Projects/worblehat-old
Projects
/
worblehat-old
Archived
12
0
Fork 0

faset over fra Z3950 til google books

This commit is contained in:
Øyvind Almelid 2010-09-23 13:57:37 +00:00
parent d173b9396e
commit 644e79f9b5
221 changed files with 60948 additions and 27273 deletions

View File

@ -1,987 +0,0 @@
#!/usr/bin/python
# Author: Rob Sanderson (azaroth@liv.ac.uk)
# Distributed and Usable under the GPL
# Version: 1.7
# Most Recent Changes: contexts, new modifier style for 1.1
#
# With thanks to Adam from IndexData and Mike Taylor for their valuable input
from shlex import shlex
from xml.sax.saxutils import escape
from xml.dom.minidom import Node, parseString
from PyZ3950.SRWDiagnostics import *
# Don't use cStringIO as it borks Unicode (apparently)
from StringIO import StringIO
import types
# Parsing strictness flags
errorOnEmptyTerm = 0 # index = "" (often meaningless)
errorOnQuotedIdentifier = 0 # "/foo/bar" = "" (unnecessary BNF restriction)
errorOnDuplicatePrefix = 0 # >a=b >a=c "" (impossible due to BNF)
fullResultSetNameCheck = 1 # srw.rsn=foo and srw.rsn=foo (mutant!!)
# Base values for CQL
serverChoiceRelation = "scr"
serverChoiceIndex = "cql.serverchoice"
order = ['=', '>', '>=', '<', '<=', '<>']
modifierSeparator = "/"
booleans = ['and', 'or', 'not', 'prox']
reservedPrefixes = {"srw" : "http://www.loc.gov/zing/cql/srw-indexes/v1.0/",
"cql" : "info:srw/cql-context-set/1/cql-v1.1"}
XCQLNamespace = "http://www.loc.gov/zing/cql/xcql/"
# End of 'configurable' stuff
class PrefixableObject:
"Root object for triple and searchClause"
prefixes = {}
parent = None
config = None
def __init__(self):
self.prefixes = {}
self.parent = None
self.config = None
def toXCQL(self, depth=0):
# Just generate our prefixes
space = " " * depth
xml = ['%s<prefixes>\n' % (space)]
for p in self.prefixes.keys():
xml.append("%s <prefix>\n%s <name>%s</name>\n%s <identifier>%s</identifier>\n%s </prefix>\n" % (space, space, escape(p), space, escape(self.prefixes[p]), space))
xml.append("%s</prefixes>\n" % (space))
return ''.join(xml)
def addPrefix(self, name, identifier):
if (errorOnDuplicatePrefix and (self.prefixes.has_key(name) or reservedPrefixes.has_key(name))):
# Maybe error
diag = Diagnostic45()
diag.details = name
raise diag;
self.prefixes[name] = identifier
def resolvePrefix(self, name):
# Climb tree
if (reservedPrefixes.has_key(name)):
return reservedPrefixes[name]
elif (self.prefixes.has_key(name)):
return self.prefixes[name]
elif (self.parent <> None):
return self.parent.resolvePrefix(name)
elif (self.config <> None):
# Config is some sort of server config which specifies defaults
return self.config.resolvePrefix(name)
else:
# Top of tree, no config, no resolution->Unknown indexset
# For client we need to allow no prefix?
#diag = Diagnostic15()
#diag.details = name
#raise diag
return None
class PrefixedObject:
"Root object for relation, relationModifier and index"
prefix = ""
prefixURI = ""
value = ""
parent = None
def __init__(self, val):
# All prefixed things are case insensitive
val = val.lower()
if val and val[0] == '"' and val[-1] == '"':
if errorOnQuotedIdentifier:
diag = Diagnostic14()
diag.details = val
raise diag
else:
val = val[1:-1]
self.value = val
self.splitValue()
def __str__(self):
if (self.prefix):
return "%s.%s" % (self.prefix, self.value)
else:
return self.value
def splitValue(self):
f = self.value.find(".")
if (self.value.count('.') > 1):
diag = Diagnostic15()
diag.details = "Multiple '.' characters: %s" % (self.value)
raise(diag)
elif (f == 0):
diag = Diagnostic15()
diag.details = "Null indexset: %s" % (irt.index)
raise(diag)
elif f >= 0:
self.prefix = self.value[:f].lower()
self.value = self.value[f+1:].lower()
def resolvePrefix(self):
if (not self.prefixURI):
self.prefixURI = self.parent.resolvePrefix(self.prefix)
return self.prefixURI
class ModifiableObject:
# Treat modifiers as keys on boolean/relation?
modifiers = []
def __getitem__(self, k):
if (type(k) == types.IntType):
try:
return self.modifiers[k]
except:
return None
for m in self.modifiers:
if (str(m.type) == k or m.type.value == k):
return m
return None
class Triple (PrefixableObject):
"Object to represent a CQL triple"
leftOperand = None
boolean = None
rightOperand = None
def toXCQL(self, depth=0):
"Create the XCQL representation of the object"
space = " " * depth
if (depth == 0):
xml = ['<triple xmlns="%s">\n' % (XCQLNamespace)]
else:
xml = ['%s<triple>\n' % (space)]
if self.prefixes:
xml.append(PrefixableObject.toXCQL(self, depth+1))
xml.append(self.boolean.toXCQL(depth+1))
xml.append("%s <leftOperand>\n" % (space))
xml.append(self.leftOperand.toXCQL(depth+2))
xml.append("%s </leftOperand>\n" % (space))
xml.append("%s <rightOperand>\n" % (space))
xml.append(self.rightOperand.toXCQL(depth+2))
xml.append("%s </rightOperand>\n" % (space))
xml.append("%s</triple>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = []
if (self.prefixes):
for p in self.prefixes.keys():
if (p <> ''):
txt.append('>%s="%s"' % (p, self.prefixes[p]))
else:
txt.append('>"%s"' % (self.prefixes[p]))
prefs = ' '.join(txt)
return "(%s %s %s %s)" % (prefs, self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
else:
return "(%s %s %s)" % (self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
def getResultSetId(self, top=None):
if fullResultSetNameCheck == 0 or self.boolean.value in ['not', 'prox']:
return ""
if top == None:
topLevel = 1
top = self;
else:
topLevel = 0
# Iterate over operands and build a list
rsList = []
if isinstance(self.leftOperand, Triple):
rsList.extend(self.leftOperand.getResultSetId(top))
else:
rsList.append(self.leftOperand.getResultSetId(top))
if isinstance(self.rightOperand, Triple):
rsList.extend(self.rightOperand.getResultSetId(top))
else:
rsList.append(self.rightOperand.getResultSetId(top))
if topLevel == 1:
# Check all elements are the same, if so we're a fubar form of present
if (len(rsList) == rsList.count(rsList[0])):
return rsList[0]
else:
return ""
else:
return rsList
class SearchClause (PrefixableObject):
"Object to represent a CQL searchClause"
index = None
relation = None
term = None
def __init__(self, ind, rel, t):
PrefixableObject.__init__(self)
self.index = ind
self.relation = rel
self.term = t
ind.parent = self
rel.parent = self
t.parent = self
def toXCQL(self, depth=0):
"Produce XCQL version of the object"
space = " " * depth
if (depth == 0):
xml = ['<searchClause xmlns="%s">\n' % (XCQLNamespace)]
else:
xml = ['%s<searchClause>\n' % (space)]
if self.prefixes:
xml.append(PrefixableObject.toXCQL(self, depth+1))
xml.append(self.index.toXCQL(depth+1))
xml.append(self.relation.toXCQL(depth+1))
xml.append(self.term.toXCQL(depth+1))
xml.append("%s</searchClause>\n" % (space))
return ''.join(xml)
def toCQL(self):
text = []
for p in self.prefixes.keys():
if (p <> ''):
text.append('>%s="%s"' % (p, self.prefixes[p]))
else:
text.append('>"%s"' % (self.prefixes[p]))
text.append('%s %s "%s"' % (self.index, self.relation.toCQL(), self.term))
return ' '.join(text)
def getResultSetId(self, top=None):
idx = self.index
idx.resolvePrefix()
if (idx.prefixURI == reservedPrefixes['cql'] and idx.value.lower() == 'resultsetid'):
return self.term.value
else:
return ""
class Index(PrefixedObject):
"Object to represent a CQL index"
def toXCQL(self, depth=0):
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
return "%s<index%s>%s</index>\n" % (" "*depth, ns, escape(str(self)))
def toCQL(self):
return str(self)
class Relation(PrefixedObject, ModifiableObject):
"Object to represent a CQL relation"
def __init__(self, rel, mods=[]):
self.prefix = "cql"
PrefixedObject.__init__(self, rel)
self.modifiers = mods
for m in mods:
m.parent = self
def toXCQL(self, depth=0):
"Create XCQL representation of object"
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
space = " " * depth
xml = ["%s<relation%s>\n" % (space, ns)]
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
if self.modifiers:
xml.append("%s <modifiers>\n" % (space))
for m in self.modifiers:
xml.append(m.toXCQL(depth+2))
xml.append("%s </modifiers>\n" % (space))
xml.append("%s</relation>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = [self.value]
txt.extend(map(str, self.modifiers))
return '/'.join(txt)
class Term:
value = ""
def __init__(self, v):
if (v <> ""):
# Unquoted literal
if v in ['>=', '<=', '>', '<', '<>', "/", '=']:
diag = Diagnostic25()
diag.details = self.value
raise diag
# Check existence of meaningful term
nonanchor = 0
for c in v:
if c != "^":
nonanchor = 1
break
if not nonanchor:
diag = Diagnostic32()
diag.details = "Only anchoring charater(s) in term: " + v
raise diag
# Unescape quotes
if (v[0] == '"' and v[-1] == '"'):
v = v[1:-1]
v = v.replace('\\"', '"')
if (not v and errorOnEmptyTerm):
diag = Diagnostic27()
raise diag
# Check for badly placed \s
startidx = 0
idx = v.find("\\", startidx)
while (idx > -1):
startidx = idx+1
if not irt.term[idx+1] in ['?', '\\', '*', '^']:
diag = Diagnostic26()
diag.details = irt.term
raise diag
v = v.find("\\", startidx)
elif (errorOnEmptyTerm):
diag = Diagnostic27()
raise diag
self.value = v
def __str__(self):
return self.value
def toXCQL(self, depth=0):
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
return "%s<term%s>%s</term>\n" % (" "*depth, ns, escape(self.value))
class Boolean(ModifiableObject):
"Object to represent a CQL boolean"
value = ""
parent = None
def __init__(self, bool, mods=[]):
self.value = bool
self.modifiers = mods
self.parent = None
def toXCQL(self, depth=0):
"Create XCQL representation of object"
space = " " * depth
xml = ["%s<boolean>\n" % (space)]
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
if self.modifiers:
xml.append("%s <modifiers>\n" % (space))
for m in self.modifiers:
xml.append(m.toXCQL(depth+2))
xml.append("%s </modifiers>\n" % (space))
xml.append("%s</boolean>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = [self.value]
for m in self.modifiers:
txt.append(m.toCQL())
return '/'.join(txt)
def resolvePrefix(self, name):
return self.parent.resolvePrefix(name)
class ModifierType(PrefixedObject):
# Same as index, but we'll XCQLify in ModifierClause
parent = None
prefix = "cql"
class ModifierClause:
"Object to represent a relation modifier"
parent = None
type = None
comparison = ""
value = ""
def __init__(self, type, comp="", val=""):
self.type = ModifierType(type)
self.type.parent = self
self.comparison = comp
self.value = val
def __str__(self):
if (self.value):
return "%s%s%s" % (str(self.type), self.comparison, self.value)
else:
return "%s" % (str(self.type))
def toXCQL(self, depth=0):
if (self.value):
return "%s<modifier>\n%s<type>%s</type>\n%s<comparison>%s</comparison>\n%s<value>%s</value>\n%s</modifier>\n" % (" " * depth, " " * (depth+1), escape(str(self.type)), " " * (depth+1), escape(self.comparison), " " * (depth+1), escape(self.value), " " * depth)
else:
return "%s<modifier><type>%s</type></modifier>\n" % (" " * depth, escape(str(self.type)))
def toCQL(self):
return str(self)
def resolvePrefix(self, name):
# Need to skip parent, which has its own resolvePrefix
# eg boolean or relation, neither of which is prefixable
return self.parent.parent.resolvePrefix(name)
# Requires changes for: <= >= <>, and escaped \" in "
# From shlex.py (std library for 2.2+)
class CQLshlex(shlex):
"shlex with additions for CQL parsing"
quotes = '"'
commenters = ""
nextToken = ""
def __init__(self, thing):
shlex.__init__(self, thing)
self.wordchars += "!@#$%^&*-+{}[];,.?|~`:\\"
self.wordchars += ''.join(map(chr, range(128,254)))
def read_token(self):
"Read a token from the input stream (no pushback or inclusions)"
while 1:
if (self.nextToken != ""):
self.token = self.nextToken
self.nextToken = ""
# Bah. SUPER ugly non portable
if self.token == "/":
self.state = ' '
break
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state ", repr(self.state), " I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
self.token = nextchar
self.state = nextchar
elif nextchar in ['<', '>']:
self.token = nextchar
self.state = '<'
else:
self.token = nextchar
if self.token:
break # emit current token
else:
continue
elif self.state == '<':
# Only accumulate <=, >= or <>
if self.token == ">" and nextchar == "=":
self.token = self.token + nextchar
self.state = ' '
break
elif self.token == "<" and nextchar in ['>', '=']:
self.token = self.token + nextchar
self.state = ' '
break
elif not nextchar:
self.state = None
break
elif nextchar == "/":
self.state = "/"
self.nextToken = "/"
break
elif nextchar in self.wordchars:
self.state='a'
self.nextToken = nextchar
break
elif nextchar in self.quotes:
self.state=nextchar
self.nextToken = nextchar
break
else:
self.state = ' '
break
elif self.state in self.quotes:
self.token = self.token + nextchar
# Allow escaped quotes
if nextchar == self.state and self.token[-2] != '\\':
self.state = ' '
break
elif not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# Override SHLEX's ValueError to throw diagnostic
diag = Diagnostic14()
diag.details = self.token[:-1]
raise diag
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars or nextchar in self.quotes:
self.token = self.token + nextchar
elif nextchar in ['>', '<']:
self.nextToken = nextchar
self.state = '<'
break
else:
self.pushback = [nextchar] + self.pushback
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.debug > 1:
if result:
print "shlex: raw token=" + `result`
else:
print "shlex: raw token=EOF"
return result
class CQLParser:
"Token parser to create object structure for CQL"
parser = ""
currentToken = ""
nextToken = ""
def __init__(self, p):
""" Initialise with shlex parser """
self.parser = p
self.fetch_token() # Fetches to next
self.fetch_token() # Fetches to curr
def is_boolean(self, token):
"Is the token a boolean"
token = token.lower()
return token in booleans
def fetch_token(self):
""" Read ahead one token """
tok = self.parser.get_token()
self.currentToken = self.nextToken
self.nextToken = tok
def prefixes(self):
"Create prefixes dictionary"
prefs = {}
while (self.currentToken == ">"):
# Strip off maps
self.fetch_token()
if self.nextToken == "=":
# Named map
name = self.currentToken
self.fetch_token() # = is current
self.fetch_token() # id is current
identifier = self.currentToken
self.fetch_token()
else:
name = ""
identifier = self.currentToken
self.fetch_token()
if (errorOnDuplicatePrefix and prefs.has_key(name)):
# Error condition
diag = Diagnostic45()
diag.details = name
raise diag;
if len(identifier) > 1 and identifier[0] == '"' and identifier[-1] == '"':
identifier = identifier[1:-1]
prefs[name.lower()] = identifier
return prefs
def query(self):
""" Parse query """
prefs = self.prefixes()
left = self.subQuery()
while 1:
if not self.currentToken:
break;
bool = self.is_boolean(self.currentToken)
if bool:
boolobject = self.boolean()
right = self.subQuery()
# Setup Left Object
trip = tripleType()
trip.leftOperand = left
trip.boolean = boolobject
trip.rightOperand = right
left.parent = trip
right.parent = trip
boolobject.parent = trip
left = trip
else:
break;
for p in prefs.keys():
left.addPrefix(p, prefs[p])
return left
def subQuery(self):
""" Find either query or clause """
if self.currentToken == "(":
self.fetch_token() # Skip (
object = self.query()
if self.currentToken == ")":
self.fetch_token() # Skip )
else:
diag = Diagnostic13()
diag.details = self.currentToken
raise diag
else:
prefs = self.prefixes()
if (prefs):
object = self.query()
for p in prefs.keys():
object.addPrefix(p, prefs[p])
else:
object = self.clause()
return object
def clause(self):
""" Find searchClause """
bool = self.is_boolean(self.nextToken)
if not bool and not (self.nextToken in [')', '(', '']):
index = indexType(self.currentToken)
self.fetch_token() # Skip Index
rel = self.relation()
if (self.currentToken == ''):
diag = Diagnostic10()
diag.details = "Expected Term, got end of query."
raise(diag)
term = termType(self.currentToken)
self.fetch_token() # Skip Term
irt = searchClauseType(index, rel, term)
elif self.currentToken and (bool or self.nextToken in [')', '']):
irt = searchClauseType(indexType(serverChoiceIndex), relationType(serverChoiceRelation), termType(self.currentToken))
self.fetch_token()
elif self.currentToken == ">":
prefs = self.prefixes()
# iterate to get object
object = self.clause()
for p in prefs.keys():
object.addPrefix(p, prefs[p]);
return object
else:
diag = Diagnostic10()
diag.details = "Expected Boolean or Relation but got: " + self.currentToken
raise diag
return irt
def modifiers(self):
mods = []
while (self.currentToken == modifierSeparator):
self.fetch_token()
mod = self.currentToken
mod = mod.lower()
if (mod == modifierSeparator):
diag = Diagnostic20()
diag.details = "Null modifier"
raise diag
self.fetch_token()
comp = self.currentToken
if (comp in order):
self.fetch_token()
value = self.currentToken
self.fetch_token()
else:
comp = ""
value = ""
mods.append(ModifierClause(mod, comp, value))
return mods
def boolean(self):
""" Find boolean """
self.currentToken = self.currentToken.lower()
if self.currentToken in booleans:
bool = booleanType(self.currentToken)
self.fetch_token()
bool.modifiers = self.modifiers()
for b in bool.modifiers:
b.parent = bool
else:
diag = Diagnostic37()
diag.details = self.currentToken
raise diag
return bool
def relation(self):
""" Find relation """
self.currentToken = self.currentToken.lower()
rel = relationType(self.currentToken)
self.fetch_token()
rel.modifiers = self.modifiers()
for r in rel.modifiers:
r.parent = rel
return rel
class XCQLParser:
""" Parser for XCQL using some very simple DOM """
def firstChildElement(self, elem):
""" Find first child which is an Element """
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
return c
return None
def firstChildData(self,elem):
""" Find first child which is Data """
for c in elem.childNodes:
if c.nodeType == Node.TEXT_NODE:
return c
return None
def searchClause(self, elem):
""" Process a <searchClause> """
sc = searchClauseType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "index":
sc.index = indexType(self.firstChildData(c).data.lower())
elif c.localName == "term":
sc.term = termType(self.firstChildData(c).data)
elif c.localName == "relation":
sc.relation = self.relation(c)
elif c.localName == "prefixes":
sc.prefixes = self.prefixes(c)
else:
raise(ValueError, c.localName)
return sc
def triple(self, elem):
""" Process a <triple> """
trip = tripleType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "boolean":
trip.boolean = self.boolean(c)
elif c.localName == "prefixes":
trip.prefixes = self.prefixes(c)
elif c.localName == "leftOperand":
c2 = self.firstChildElement(c)
if c2.localName == "searchClause":
trip.leftOperand = self.searchClause(c2)
else:
trip.leftOperand = self.triple(c2)
else:
c2 = self.firstChildElement(c)
if c2.localName == "searchClause":
trip.rightOperand = self.searchClause(c2)
else:
trip.rightOperand = self.triple(c2)
return trip
def relation(self, elem):
""" Process a <relation> """
rel = relationType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "value":
rel.value = c.firstChild.data.lower()
elif c.localName == "modifiers":
mods = []
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "modifier":
for c3 in c2.childNodes:
if c3.localName == "value":
val = self.firstChildData(c2).data.lower()
mods.append(val)
rel.modifiers = mods
return rel
def boolean(self, elem):
"Process a <boolean>"
bool = booleanType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "value":
bool.value = self.firstChildData(c).data.lower()
else:
# Can be in any order, so we need to extract, then order
mods = {}
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "modifier":
type = ""
value = ""
for c3 in c2.childNodes:
if c3.nodeType == Node.ELEMENT_NODE:
if c3.localName == "value":
value = self.firstChildData(c3).data.lower()
elif c3.localName == "type":
type = self.firstChildData(c3).data
mods[type] = value
modlist = []
for t in booleanModifierTypes[1:]:
if mods.has_key(t):
modlist.append(mods[t])
else:
modlist.append('')
bool.modifiers = modlist
return bool
def prefixes(self, elem):
"Process <prefixes>"
prefs = {}
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
# prefix
name = ""
identifier = ""
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "name":
name = self.firstChildData(c2).data.lower()
elif c2.localName == "identifier":
identifier = self.firstChildData(c2).data
prefs[name] = identifier
return prefs
def xmlparse(s):
""" API. Return a seachClause/triple object from XML string """
doc = parseString(s)
q = xcqlparse(doc.firstChild)
return q
def xcqlparse(query):
""" API. Return a searchClause/triple object from XML DOM objects"""
# Requires only properties of objects so we don't care how they're generated
p = XCQLParser()
if query.localName == "searchClause":
return p.searchClause(query)
else:
return p.triple(query)
def parse(query):
""" API. Return a searchClause/triple object from CQL string"""
try:
query = query.encode("utf-8")
except:
diag = Diagnostic10()
diag.details = "Cannot parse non utf-8 characters"
raise diag
q = StringIO(query)
lexer = CQLshlex(q)
parser = CQLParser(lexer)
object = parser.query()
if parser.currentToken != '':
diag = Diagnostic10()
diag.details = "Unprocessed tokens remain: " + repr(parser.currentToken)
raise diag
else:
del lexer
del parser
del q
return object
# Assign our objects to generate
tripleType = Triple
booleanType = Boolean
relationType = Relation
searchClauseType = SearchClause
modifierClauseType = ModifierClause
modifierTypeType = ModifierType
indexType = Index
termType = Term
try:
from CQLUtils import *
tripleType = CTriple
booleanType = CBoolean
relationType = CRelation
searchClauseType = CSearchClause
modifierClauseType = CModifierClause
modifierTypeType = CModifierType
indexType = CIndex
termType = CTerm
except:
# Nested scopes. Utils needs our classes to parent
# We need its classes to build (maybe)
pass
if (__name__ == "__main__"):
import sys;
s = sys.stdin.readline()
try:
q = parse(s);
except SRWDiagnostic, diag:
# Print a full version, not just str()
print "Diagnostic Generated."
print " Code: " + str(diag.code)
print " Details: " + str(diag.details)
print " Message: " + str(diag.message)
else:
print q.toXCQL()[:-1];

View File

@ -1,544 +0,0 @@
"""CQL utility functions and subclasses"""
from CQLParser import *
from types import ListType, IntType
from SRWDiagnostics import *
from PyZ3950 import z3950, asn1, oids
from PyZ3950.zdefs import make_attr
asn1.register_oid (oids.Z3950_QUERY_CQL, asn1.GeneralString)
class ZCQLConfig:
contextSets = {'dc' : 'info:srw/cql-context-set/1/dc-v1.1',
'cql' : 'info:srw/cql-context-set/1/cql-v1.1',
'bath' : 'http://zing.z3950.org/cql/bath/2.0/',
'zthes' : 'http://zthes.z3950.org/cql/1.0/',
'ccg' : 'http://srw.cheshire3.org/contextSets/ccg/1.1/ ',
'rec' : 'info:srw/cql-context-set/2/rec-1.0',
'net' : 'info:srw/cql-context-set/2/net-1.0'}
dc = {'title' : 4,
'subject' : 21,
'creator' : 1003,
'author' : 1003,
'editor' : 1020,
'contributor' : 1018,
'publisher' : 1018,
'description' : 62,
'date' : 30,
'resourceType' : 1031,
'type' : 1031,
'format' : 1034,
'identifier' : 12,
'source' : 1019,
'language' : 54,
'relation' : 1016,
'coverage' : 1016,
'rights' : 1016
}
cql = {'anywhere' : 1016,
'serverChoice' : 1016}
# The common bib1 points
bib1 = {"personal_name" : 1,
"corporate_name" : 2,
"conference_name" : 3,
"title" : 4,
"title_series" : 5,
"title_uniform" : 6,
"isbn" : 7,
"issn" : 8,
"lccn" : 9,
"local_number" : 12,
"dewey_number" : 13,
"lccn" : 16,
"local_classification" : 20,
"subject" : 21,
"subject_lc" : 27,
"subject_local" : 29,
"date" : 30,
"date_publication" : 31,
"date_acquisition" : 32,
"local_call_number" : 53,
"abstract" : 62,
"note" : 63,
"record_type" : 1001,
"name" : 1002,
"author" : 1003,
"author_personal" : 1004,
"identifier" : 1007,
"text_body" : 1010,
"date_modified" : 1012,
"date_added" : 1011,
"concept_text" : 1014,
"any" : 1016,
"default" : 1017,
"publisher" : 1018,
"record_source" : 1019,
"editor" : 1020,
"docid" : 1032,
"anywhere" : 1035,
"sici" : 1037
}
exp1 = {"explainCategory" :1,
"humanStringLanguage" : 2,
"databaseName" : 3,
"serverName" : 4,
"attributeSetOID" : 5,
"recordSyntaxOID" : 6,
"tagSetOID" : 7,
"extendedServiceOID" : 8,
"dateAdded" : 9,
"dateChanged" : 10,
"dateExpires" : 11,
"elementSetName" : 12,
"processingContext" : 13,
"processingName" : 14,
"termListName" : 15,
"schemaOID" : 16,
"producer" : 17,
"supplier" : 18,
"availability" : 19,
"proprietary" : 20,
"userFee" : 21,
"variantSetOID" : 22,
"unitSystem" : 23,
"keyword" : 24,
"explainDatabase" : 25,
"processingOID" : 26
}
xd1 = {"title" : 1,
"subject" : 2,
"name" : 3,
"description" : 4,
"date" : 5,
"type" : 6,
"format" : 7,
"identifier" : 8,
"source" : 9,
"langauge" : 10,
"relation" : 11,
"coverage" : 12,
"rights" : 13}
util = {"record_date" : 1,
"record_agent" : 2,
"record_language" : 3,
"control_number" : 4,
"cost" : 5,
"record_syntax" : 6,
"database_schema" : 7,
"score" : 8,
"rank" : 9,
"result_set_position" : 10,
"all" : 11,
"anywhere" : 12,
"server_choice" : 13,
"wildcard" : 14,
"wildpath" : 15}
defaultAttrSet = z3950.Z3950_ATTRS_BIB1_ov
def __init__(self):
self.util1 = self.util
self.xd = self.xd1
def attrsToCql(self, attrs):
hash = {}
for c in attrs:
if (not c[0]):
c[0] = self.defaultAttrSet
hash[(c[0], c[1])] = c[2]
bib1 = z3950.Z3950_ATTRS_BIB1_ov
use = hash.get((bib1, 1), 4)
rel = hash.get((bib1, 2), 3)
posn = hash.get((bib1, 3), None)
struct = hash.get((bib1, 4), None)
trunc = hash.get((bib1, 5), None)
comp = hash.get((bib1, 6), None)
index = None
if (not isinstance(use, int)):
index = indexType(use)
else:
for v in self.dc.items():
if use == v[1]:
index = indexType("dc.%s" % (v[0]))
break
if not index:
for v in self.bib1.items():
if (use == v[1]):
index = indexType("bib1.%s" % (v[0]))
break
if not index:
index = indexType("bib1.%i" % (use))
relations = ['', '<', '<=', '=', '>=', '>', '<>']
if (comp == 3):
relation = relationType("exact")
elif (rel > 6):
if struct in [2, 6]:
relation = relationType('any')
else:
relation = relationType('=')
else:
relation = relationType(relations[rel])
if (rel == 100):
relation.modifiers.append(modifierClauseType('phonetic'))
elif (rel == 101):
relation.modifiers.append(modifierClauseType('stem'))
elif (rel == 102):
relation.modifiers.append(modifierClauseType('relevant'))
if (struct in [2, 6]):
relation.modifiers.append(modifierClauseType('word'))
elif (struct in [4, 5, 100]):
relation.modifiers.append(modifierClauseType('date'))
elif (struct == 109):
relation.modifiers.append(modifierClauseType('number'))
elif (struct in [1, 108]):
relation.modifiers.append(modifierClauseType('string'))
elif (struct == 104):
relation.modifiers.append(modifierClauseType('uri'))
return (index, relation)
zConfig = ZCQLConfig()
def rpn2cql(rpn, config=zConfig, attrSet=None):
if rpn[0] == 'op':
# single search clause
op = rpn[1]
type = op[0]
if type == 'attrTerm':
attrs = op[1].attributes
term = op[1].term
combs = []
for comb in attrs:
if hasattr(comb, 'attributeSet'):
attrSet = comb.attributeSet
if hasattr(comb, 'attributeType'):
aType = comb.attributeType
else:
# Broken!
aType = 1
vstruct = comb.attributeValue
if (vstruct[0] == 'numeric'):
aValue = vstruct[1]
else:
# Complex attr value
vstruct = vstruct[1]
if (hasattr(vstruct, 'list')):
aValue = vstruct.list[0][1]
else:
# semanticAction?
aValue = vstruct.semanticAction[0][1]
combs.append([attrSet, aType, aValue])
# Now let config do its thing
(index, relation) = config.attrsToCql(combs)
return searchClauseType(index, relation, termType(term[1]))
elif type == 'resultSet':
return searchClauseType(indexType('cql.resultSetId'), relationType('='), termType(op[0]))
elif rpn[0] == 'rpnRpnOp':
triple = rpn[1]
bool = triple.op
lhs = triple.rpn1
rhs = triple.rpn2
ctrip = tripleType()
ctrip.leftOperation = rpn2cql(lhs, config)
ctrip.rightOperand = rpn2cql(rhs, config)
ctrip.boolean = booleanType(bool[0])
if bool[0] == 'prox':
distance = bool[1].distance
order = bool[1].ordered
if order:
order = "ordered"
else:
order = "unordered"
relation = bool[1].relationType
rels = ["", "<", "<=", "=", ">=", ">", "<>"]
relation = rels[relation]
unit = bool[1].proximityUnitCode
units = ["", "character", "word", "sentence", "paragraph", "section", "chapter", "document", "element", "subelement", "elementType", "byte"]
if unit[0] == "known":
unit = units[unit[1]]
mods = [cql.modifierClauseType('distance', relation, str(distance)), cql.modifierClauseType('word', '=', unit), cql.modifierClauseType(order)]
ctrip.boolean.modifiers = mods
return ctrip
elif rpn[0] == 'type_1':
q = rpn[1]
return rpn2cql(q.rpn, config, q.attributeSet)
class CSearchClause(SearchClause):
def convertMetachars(self, t):
"Convert SRW meta characters in to Cheshire's meta characters"
# Fail on ?, ^ or * not at the end.
if (count(t, "?") != count(t, "\\?")):
diag = Diagnostic28()
diag.details = "? Unsupported"
raise diag
elif (count(t, "^") != count(t, "\\^")):
diag = Diagnostic31()
diag.details = "^ Unsupported"
raise diag
elif (count(t, "*") != count(t, "\\*")):
if t[-1] != "*" or t[-2] == "\\":
diag = Diagnostic28()
diag.details = "Non trailing * unsupported"
raise diag
else:
t[-1] = "#"
t = replace(t, "\\^", "^")
t = replace(t, "\\?", "?")
t = replace(t, "\\*", "*")
return t
def toRPN(self, top=None):
if not top:
top = self
if (self.relation.value in ['any', 'all']):
# Need to split this into and/or tree
if (self.relation.value == 'any'):
bool = " or "
else:
bool = " and "
words = self.term.value.split()
self.relation.value = '='
# Add 'word' relationModifier
self.relation.modifiers.append(CModifierClause('cql.word'))
# Create CQL, parse it, walk new tree
idxrel = "%s %s" % (self.index.toCQL(), self.relation.toCQL())
text = []
for w in words:
text.append('%s "%s"' % (idxrel, w))
cql = bool.join(text)
tree = parse(cql)
tree.prefixes = self.prefixes
tree.parent = self.parent
tree.config = self.config
return tree.toRPN(top)
else:
# attributes, term
# AttributeElement: attributeType, attributeValue
# attributeValue ('numeric', n) or ('complex', struct)
if (self.index.value == 'resultsetid'):
return ('op', ('resultSet', self.term.value))
clause = z3950.AttributesPlusTerm()
attrs = self.index.toRPN(top)
if (self.term.value.isdigit()):
self.relation.modifiers.append(CModifierClause('cql.number'))
relattrs = self.relation.toRPN(top)
attrs.update(relattrs)
butes =[]
for e in attrs.iteritems():
butes.append((e[0][0], e[0][1], e[1]))
clause.attributes = [make_attr(*e) for e in butes]
clause.term = self.term.toRPN(top)
return ('op', ('attrTerm', clause))
class CBoolean(Boolean):
def toRPN(self, top):
op = self.value
if (self.value == 'not'):
op = 'and-not'
elif (self.value == 'prox'):
# Create ProximityOperator
prox = z3950.ProximityOperator()
# distance, ordered, proximityUnitCode, relationType
u = self['unit']
try:
units = ["", "character", "word", "sentence", "paragraph", "section", "chapter", "document", "element", "subelement", "elementType", "byte"]
if (u.value in units):
prox.unit = ('known', units.index(u.value))
else:
# Uhhhh.....
prox.unit = ('private', int(u.value))
except:
prox.unit = ('known', 2)
d = self['distance']
try:
prox.distance = int(d.value)
except:
if (prox.unit == ('known', 2)):
prox.distance = 1
else:
prox.distance = 0
try:
rels = ["", "<", "<=", "=", ">=", ">", "<>"]
prox.relationType = rels.index(d.comparison)
except:
prox.relationType = 2
prox.ordered = bool(self['ordered'])
return ('op', ('prox', prox))
return (op, None)
class CTriple(Triple):
def toRPN(self, top=None):
"""rpnRpnOp"""
if not top:
top = self
op = z3950.RpnRpnOp()
op.rpn1 = self.leftOperand.toRPN(top)
op.rpn2 = self.rightOperand.toRPN(top)
op.op = self.boolean.toRPN(top)
return ('rpnRpnOp', op)
class CIndex(Index):
def toRPN(self, top):
self.resolvePrefix()
pf = self.prefix
if (not pf and self.prefixURI):
# We have a default
for k in zConfig.contextSets:
if zConfig.contextSets[k] == self.prefixURI:
pf = k
break
# Default BIB1
set = oids.oids['Z3950']['ATTRS']['BIB1']['oid']
if (hasattr(top, 'config') and top.config):
config = top.config
# Check SRW Configuration
cql = config.contextSetNamespaces['cql']
index = self.value
if self.prefixURI == cql and self.value == "serverchoice":
# Have to resolve our prefixes etc, so create an index object to do it
index = config.defaultIndex
cidx = CIndex(index)
cidx.config = config
cidx.parent = config
cidx.resolvePrefix()
pf = cidx.prefix
index = cidx.value
if config.indexHash.has_key(pf):
if config.indexHash[pf].has_key(index):
idx = config.indexHash[pf][index]
# Need to map from this list to RPN list
attrs = {}
for i in idx:
set = asn1.OidVal(map(int, i[0].split('.')))
type = int(i[1])
if (i[2].isdigit()):
val = int(i[2])
else:
val = i[2]
attrs[(set, type)] = val
return attrs
else:
diag = Diagnostic16()
diag.details = index
diag.message = "Unknown index"
raise diag
else:
diag = Diagnostic15()
diag.details = pf
diag.message = "Unknown context set"
raise diag
elif (hasattr(zConfig, pf)):
mp = getattr(zConfig, pf)
if (mp.has_key(self.value)):
val = mp[self.value]
else:
val = self.value
elif (oids.oids['Z3950']['ATTRS'].has_key(pf.upper())):
set = oids.oids['Z3950']['ATTRS'][pf.upper()]['oid']
if (self.value.isdigit()):
# bib1.1018
val = int(self.value)
else:
# complex attribute for bib1
val = self.value
else:
print "Can't resolve %s" % pf
raise(ValueError)
return {(set, 1) : val}
class CRelation(Relation):
def toRPN(self, top):
rels = ['', '<', '<=', '=', '>=', '>', '<>']
set = z3950.Z3950_ATTRS_BIB1_ov
vals = [None, None, None, None, None, None, None]
if self.value in rels:
vals[2] = rels.index(self.value)
elif self.value in ['exact', 'scr']:
vals[2] = 3
elif (self.value == 'within'):
vals[2] = 104
if self['relevant']:
vals[2] = 102
elif self['stem']:
vals[2] = 101
elif self['phonetic']:
vals[2] = 100
if self['number']:
vals[4] = 109
vals[5] = 100
elif self['date']:
vals[4] = 5
elif self['word']:
vals[4] = 2
if self.value == 'exact':
vals[3] = 1
vals[5] = 100
# vals[6] = 3
else:
vals[3] = 3
# vals[6] = 1
attrs = {}
for x in range(1,7):
if vals[x]:
attrs[(z3950.Z3950_ATTRS_BIB1_ov, x)] = vals[x]
return attrs
class CTerm(Term):
def toRPN(self, top):
return ('general', self.value)
class CModifierClause(ModifierClause):
pass
class CModifierType(ModifierType):
pass

View File

@ -1,40 +0,0 @@
# PyZ3950_parsetab.py
# This file is automatically generated. Do not edit.
_lr_method = 'SLR'
_lr_signature = '\xfc\xb2\xa8\xb7\xd9\xe7\xad\xba"\xb2Ss\'\xcd\x08\x16'
_lr_action_items = {'QUOTEDVALUE':([5,26,0,19,16,],[1,1,1,1,1,]),'LOGOP':([3,25,4,14,9,6,27,23,13,20,22,1,],[-5,-9,-14,-13,16,-8,16,-7,16,-6,-4,-12,]),'SET':([0,16,5,26,],[11,11,11,11,]),'RPAREN':([27,23,3,22,1,25,13,4,20,6,14,],[28,-7,-5,-4,-12,-9,20,-14,-6,-8,-13,]),'$':([8,14,2,23,3,20,28,25,9,1,4,6,22,],[0,-13,-1,-7,-5,-6,-3,-9,-2,-12,-14,-8,-4,]),'SLASH':([21,],[26,]),'ATTRSET':([0,],[7,]),'QUAL':([0,26,16,18,5,],[10,10,10,24,10,]),'COMMA':([10,12,24,],[-10,18,-11,]),'LPAREN':([26,0,16,7,5,],[5,5,5,15,5,]),'WORD':([19,17,14,0,5,26,6,16,15,1,4,25,],[4,23,-13,4,4,4,14,4,21,-12,-14,14,]),'RELOP':([11,24,10,12,],[17,-11,-10,19,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
_lr_action[(_x,_k)] = _y
del _lr_action_items
_lr_goto_items = {'cclfind_or_attrset':([0,],[2,]),'elements':([5,26,16,0,],[3,3,22,3,]),'quallist':([5,26,0,16,],[12,12,12,12,]),'val':([5,16,26,19,0,],[6,6,6,25,6,]),'top':([0,],[8,]),'cclfind':([5,0,26,],[13,9,27,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
_lr_goto[(_x,_k)] = _y
del _lr_goto_items
_lr_productions = [
("S'",1,None,None,None),
('top',1,'p_top','./ccl.py',154),
('cclfind_or_attrset',1,'p_cclfind_or_attrset_1','./ccl.py',158),
('cclfind_or_attrset',6,'p_cclfind_or_attrset_2','./ccl.py',162),
('cclfind',3,'p_ccl_find_1','./ccl.py',166),
('cclfind',1,'p_ccl_find_2','./ccl.py',170),
('elements',3,'p_elements_1','./ccl.py',174),
('elements',3,'p_elements_2','./ccl.py',196),
('elements',1,'p_elements_3','./ccl.py',202),
('elements',3,'p_elements_4','./ccl.py',206),
('quallist',1,'p_quallist_1','./ccl.py',213),
('quallist',3,'p_quallist_2','./ccl.py',217),
('val',1,'p_val_1','./ccl.py',221),
('val',2,'p_val_2','./ccl.py',225),
('val',1,'p_val_3','./ccl.py',229),
]

View File

@ -1,451 +0,0 @@
# Base Class
class SRWDiagnostic (Exception):
""" Base Diagnostic Class"""
code = 0
uri = "info:srw/diagnostic/1/"
details = ""
message = ""
surrogate = 0
fatal = 1
def __str__(self):
return "%s [%s]: %s" % (self.uri, self.message, self.details)
# NB 'Need' name for serialization in SRW
def __init__(self, name=None):
if (self.code):
self.uri = "%s%d" % (self.uri, self.code)
Exception.__init__(self)
# Diagnostic Types
class GeneralDiagnostic (SRWDiagnostic):
pass
class CQLDiagnostic (SRWDiagnostic):
pass
class RecordDiagnostic (SRWDiagnostic):
pass
class ResultSetDiagnostic (SRWDiagnostic):
pass
class SortDiagnostic (SRWDiagnostic):
pass
class StyleDiagnostic (SRWDiagnostic):
pass
class ScanDiagnostic (SRWDiagnostic):
pass
class DeprecatedDiagnostic(SRWDiagnostic):
def __init__(self, name=None):
print "WARNING: Use of deprecated diagnostic %s" % (self)
SRWDiagnostic.__init__(self)
class ExplainDiagnostic (DeprecatedDiagnostic):
pass
# Rob's (empty) diagnostic set
class RobDiagnostic (SRWDiagnostic):
uri = "info:srw/diagnostic/2/"
# Individual Diagnostics
class Diagnostic1 (GeneralDiagnostic):
code = 1
message = "General system error"
class Diagnostic2 (GeneralDiagnostic):
code = 2
message = "System temporarily unavailable"
class Diagnostic3 (GeneralDiagnostic):
code = 3
message = "Authentication error"
class Diagnostic4 (GeneralDiagnostic):
code = 4
message = "Unsupported operation"
class Diagnostic5 (GeneralDiagnostic):
code = 5
message = "Unsupported version"
class Diagnostic6 (GeneralDiagnostic):
code = 6
message = "Unsupported parameter value"
class Diagnostic7 (GeneralDiagnostic):
code = 7
message = "Mandatory parameter not supplied"
class Diagnostic8 (GeneralDiagnostic):
code = 8
message = "Unknown parameter"
class Diagnostic10 (CQLDiagnostic):
code = 10
message = "Malformed query"
class Diagnostic13 (CQLDiagnostic):
code = 13
message = "Unsupported use of parentheses"
class Diagnostic14 (CQLDiagnostic):
code = 14
message = "Unsupported use of quotes"
class Diagnostic15 (CQLDiagnostic):
code = 15
message = "Unsupported context set"
class Diagnostic16 (CQLDiagnostic):
code = 16
message = "Unsupported index"
class Diagnostic18 (CQLDiagnostic):
code = 18
message = "Unsupported combination of indexes"
class Diagnostic19 (CQLDiagnostic):
code = 19
message = "Unsupported relation"
class Diagnostic20 (CQLDiagnostic):
code = 20
message = "Unsupported relation modifier"
class Diagnostic21 (CQLDiagnostic):
code = 21
message = "Unsupported combination of relation modifiers"
class Diagnostic22 (CQLDiagnostic):
code = 22
message = "Unsupported combination of relation and index"
class Diagnostic23 (CQLDiagnostic):
code = 23
message = "Too many characters in term"
class Diagnostic24 (CQLDiagnostic):
code = 24
message = "Unsupported combination of relation and term"
class Diagnostic26 (CQLDiagnostic):
code = 26
message = "Non special character escaped in term"
class Diagnostic27 (CQLDiagnostic):
code = 27
message = "Empty term unsupported"
class Diagnostic28 (CQLDiagnostic):
code = 28
message = "Masking character not supported"
class Diagnostic29 (CQLDiagnostic):
code = 29
message = "Masked words too short"
class Diagnostic30 (CQLDiagnostic):
code = 30
message = "Too many masking characters in term"
class Diagnostic31 (CQLDiagnostic):
code = 31
message = "Anchoring character not supported"
class Diagnostic32 (CQLDiagnostic):
code = 32
message = "Anchoring character in unsupported position."
class Diagnostic33 (CQLDiagnostic):
code = 33
message = "Combination of proximity/adjacency and masking characters not supported"
class Diagnostic34 (CQLDiagnostic):
code = 34
message = "Combination of proximity/adjacency and anchoring characters not supported"
class Diagnostic35 (CQLDiagnostic):
code = 35
message = "Term only stopwords"
class Diagnostic36 (CQLDiagnostic):
code = 36
message = "Term in invalid format for index or relation"
class Diagnostic37 (CQLDiagnostic):
code = 37
message = "Unsupported boolean operator"
class Diagnostic38 (CQLDiagnostic):
code = 38
message = "Too many boolean operators"
class Diagnostic39 (CQLDiagnostic):
code = 39
message = "Proximity not supported"
class Diagnostic40 (CQLDiagnostic):
code = 40
message = "Unsupported proximity relation"
class Diagnostic41 (CQLDiagnostic):
code = 41
message = "Unsupported proximity distance"
class Diagnostic42 (CQLDiagnostic):
code = 42
message = "Unsupported proximity unit"
class Diagnostic43 (CQLDiagnostic):
code = 43
message = "Unsupported proximity ordering"
class Diagnostic44 (CQLDiagnostic):
code = 44
message = "Unsupported combination of proximity modifiers"
class Diagnostic50 (ResultSetDiagnostic):
code = 50
message = "Result sets not supported"
class Diagnostic51 (ResultSetDiagnostic):
code = 51
message = "Result set does not exist"
class Diagnostic52 (ResultSetDiagnostic):
code = 52
message = "Result set temporarily unavailable"
class Diagnostic53 (ResultSetDiagnostic):
code = 53
message = "Result sets only supported for retrieval"
class Diagnostic55 (ResultSetDiagnostic):
code = 55
message = "Combination of result sets with search terms not supported"
class Diagnostic58 (ResultSetDiagnostic):
code = 58
message = "Result set created with unpredictable partial results available"
class Diagnostic59 (ResultSetDiagnostic):
code = 59
message = "Result set created with valid partial results available"
class Diagnostic60 (RecordDiagnostic):
code = 60
message = "Too many records retrieved"
class Diagnostic61 (RecordDiagnostic):
code = 61
message = "First record position out of range"
class Diagnostic64 (RecordDiagnostic):
code = 64
message = "Record temporarily unavailable"
surrogate = 1
class Diagnostic65 (RecordDiagnostic):
code = 65
message = "Record does not exist"
surrogate = 1
class Diagnostic66 (RecordDiagnostic):
code = 66
message = "Unknown schema for retrieval"
class Diagnostic67 (RecordDiagnostic):
code = 67
message = "Record not available in this schema"
surrogate = 1
class Diagnostic68 (RecordDiagnostic):
code = 68
message = "Not authorised to send record"
surrogate = 1
class Diagnostic69 (RecordDiagnostic):
code = 69
message = "Not authorised to send record in this schema"
surrogate = 1
class Diagnostic70 (RecordDiagnostic):
code = 70
message = "Record too large to send"
surrogate = 1
class Diagnostic71 (RecordDiagnostic):
code = 71
message = "Unsupported record packing"
class Diagnostic72 (RecordDiagnostic):
code = 72
message = "XPath retrieval unsupported"
class Diagnostic73 (RecordDiagnostic):
code = 73
message = "XPath expression contains unsupported feature"
class Diagnostic74 (RecordDiagnostic):
code = 74
message = "Unable to evaluate XPath expression"
class Diagnostic80 (SortDiagnostic):
code = 80
message = "Sort not supported"
class Diagnostic82 (SortDiagnostic):
code = 82
message = "Unsupported sort sequence"
class Diagnostic83 (SortDiagnostic):
code = 83
message = "Too many records to sort"
class Diagnostic84 (SortDiagnostic):
code = 84
message = "Too many sort keys"
class Diagnostic86 (SortDiagnostic):
code = 86
message = "Incompatible record formats"
class Diagnostic87 (SortDiagnostic):
code = 87
message = "Unsupported schema for sort"
class Diagnostic88 (SortDiagnostic):
code = 88
message = "Unsupported tag path for sort"
class Diagnostic89 (SortDiagnostic):
code = 89
message = "Tag path unsupported for schema"
class Diagnostic90 (SortDiagnostic):
code = 90
message = "Unsupported direction value"
class Diagnostic91 (SortDiagnostic):
code = 91
message = "Unsupported case value"
class Diagnostic92 (SortDiagnostic):
code = 92
message = "Unsupported missing value action"
class Diagnostic110 (StyleDiagnostic):
code = 110
message = "Stylesheets not supported"
class Diagnostic111 (StyleDiagnostic):
code = 111
message = "Unsupported stylesheet"
class Diagnostic120 (ScanDiagnostic):
code = 120
message = "Response position out of range"
class Diagnostic121 (ScanDiagnostic):
code = 121
message = "Too many terms requested"
# Deprecated diagnostics
class Diagnostic11 (DeprecatedDiagnostic):
code = 11
message = "Unsupported query type"
class Diagnostic12 (DeprecatedDiagnostic):
code = 12
message = "Too many characters in query"
class Diagnostic17 (DeprecatedDiagnostic):
code = 17
message = "Illegal or unsupported combination of index and index set."
class Diagnostic25 (DeprecatedDiagnostic):
code = 25
message = "Special characters not quoted in term"
class Diagnostic45 (DeprecatedDiagnostic):
code = 45
message = "Index set name (prefix) assigned to multiple identifiers"
class Diagnostic54 (DeprecatedDiagnostic):
code = 54
message = "Retrieval may only occur from an existing result set"
class Diagnostic56 (DeprecatedDiagnostic):
code = 56
message = "Only combination of single result set with search terms supported"
class Diagnostic57 (DeprecatedDiagnostic):
code = 57
message = "Result set created but no records available"
class Diagnostic62 (DeprecatedDiagnostic):
code = 62
message = "Negative number of records requested"
class Diagnostic63 (DeprecatedDiagnostic):
code = 63
message = "System error in retrieving records"
class Diagnostic81 (DeprecatedDiagnostic):
code = 81
message = "Unsupported sort type"
class Diagnostic85 (DeprecatedDiagnostic):
code = 85
message = "Duplicate sort keys"
class Diagnostic100 (ExplainDiagnostic):
code = 100
message = "Explain not supported"
class Diagnostic101 (ExplainDiagnostic):
code = 101
message = "Explain request type not supported"
class Diagnostic102 (ExplainDiagnostic):
code = 102
message = "Explain record temporarily unavailable"

View File

@ -1,5 +0,0 @@
"""Python Z3950/MARC/ASN.1 package, supporting ZOOM API.
"""
__all__ = ['zoom', 'zmarc']
# only pieces most users need: if you need asn1, import it explicitly

File diff suppressed because it is too large Load Diff

View File

@ -1,191 +0,0 @@
"""Translate bib-1 error numbers to messages."""
from PyZ3950 import asn1
from PyZ3950 import z3950
from PyZ3950 import oids
msg_dict = {
1: 'permanent system error', # (unspecified),
2: 'temporary system error', # (unspecified),
3: 'unsupported search', # (unspecified),
4: 'Terms only exclusion (stop) words', # (unspecified),
5: 'Too many argument words', # (unspecified),
6: 'Too many boolean operators', # (unspecified),
7: 'Too many truncated words', # (unspecified),
8: 'Too many incomplete subfields', # (unspecified),
9: 'Truncated words too short', # (unspecified),
10: 'Invalid format for record number (search term)', # (unspecified),
11: 'Too many characters in search statement', # (unspecified),
12: 'Too many records retrieved', # (unspecified),
13: 'Present request out-of-range', # (unspecified),
14: 'System error in presenting records', # (unspecified),
15: 'Record not authorized to be sent intersystem', # (unspecified),
16: 'Record exceeds Preferred-message-size', # (unspecified),
17: 'Record exceeds Exceptional-record-size', # (unspecified),
18: 'Result set not supported as a search term', # (unspecified),
19: 'Only single result set as search term supported', # (unspecified),
20: 'Only ANDing of a single result set as search term', # (unspecified),
21: 'Result set exists and replace indicator off', # (unspecified),
22: 'Result set naming not supported', # (unspecified),
23: 'Specified combination of databases not supported', # (unspecified),
24: 'Element set names not supported', # (unspecified),
25: 'Specified element set name not valid for specified database', # (unspecified),
26: 'Only generic form of element set name supported', # (unspecified),
27: 'Result set no longer exists - unilaterally deleted by target', # (unspecified),
28: 'Result set is in use', # (unspecified),
29: 'One of the specified databases is locked', # (unspecified),
30: 'Specified result set does not exist', # (unspecified),
31: 'Resources exhausted - no results available', # (unspecified),
32: 'Resources exhausted - unpredictable partial results available', # (unspecified),
33: 'Resources exhausted - valid subset of results available', # (unspecified),
100: '(unspecified) error', # (unspecified),
101: 'Access-control failure', # (unspecified),
102: 'Challenge required, could not be issued - operation terminated', # (unspecified),
103: 'Challenge required, could not be issued - record not included', # (unspecified),
104: 'Challenge failed - record not included', # (unspecified),
105: 'Terminated at origin request', # (unspecified),
106: 'No abstract syntaxes agreed to for this record', # (unspecified),
107: 'Query type not supported', # (unspecified),
108: 'Malformed query', # (unspecified),
109: 'Database unavailable', # database name,
110: 'Operator unsupported', # operator,
111: 'Too many databases specified', # maximum,
112: 'Too many result sets created', # maximum,
113: 'Unsupported attribute type', # type,
114: 'Unsupported Use attribute', # value,
115: 'Unsupported term value for Use attribute', # term,
116: 'Use attribute required but not supplied', # (unspecified),
117: 'Unsupported Relation attribute', # value,
118: 'Unsupported Structure attribute', # value,
119: 'Unsupported Position attribute', # value,
120: 'Unsupported Truncation attribute', # value,
121: 'Unsupported Attribute Set', # oid,
122: 'Unsupported Completeness attribute', # value,
123: 'Unsupported attribute combination', # (unspecified),
124: 'Unsupported coded value for term', # value,
125: 'Malformed search term', # (unspecified),
126: 'Illegal term value for attribute', # term,
127: 'Unparsable format for un-normalized value', # value,
128: 'Illegal result set name', # name,
129: 'Proximity search of sets not supported', # (unspecified),
130: 'Illegal result set in proximity search', # result set name,
131: 'Unsupported proximity relation', # value,
132: 'Unsupported proximity unit code', # value,
201: 'Proximity not supported with this attribute combination attribute', # list,
202: 'Unsupported distance for proximity', # distance,
203: 'Ordered flag not supported for proximity', # (unspecified),
205: 'Only zero step size supported for Scan', # (unspecified),
206: 'Specified step size not supported for Scan step', # size,
207: 'Cannot sort according to sequence', # sequence,
208: 'No result set name supplied on Sort', # (unspecified),
209: 'Generic sort not supported (database-specific sort only supported)', # (unspecified),
210: 'Database specific sort not supported', # (unspecified),
211: 'Too many sort keys', # number,
212: 'Duplicate sort keys', # key,
213: 'Unsupported missing data action', # value,
214: 'Illegal sort relation', # relation,
215: 'Illegal case value', # value,
216: 'Illegal missing data action', # value,
217: 'Segmentation: Cannot guarantee records will fit in specified segments', # (unspecified),
218: 'ES: Package name already in use', # name,
219: 'ES: no such package, on modify/delete', # name,
220: 'ES: quota exceeded', # (unspecified),
221: 'ES: extended service type not supported', # type,
222: 'ES: permission denied on ES - id not authorized', # (unspecified),
223: 'ES: permission denied on ES - cannot modify or delete', # (unspecified),
224: 'ES: immediate execution failed', # (unspecified),
225: 'ES: immediate execution not supported for this service', # (unspecified),
226: 'ES: immediate execution not supported for these parameters', # (unspecified),
227: 'No data available in requested record syntax', # (unspecified),
228: 'Scan: malformed scan', # (unspecified),
229: 'Term type not supported', # type,
230: 'Sort: too many input results', # max,
231: 'Sort: incompatible record formats', # (unspecified),
232: 'Scan: term list not supported', # alternative term list,
233: 'Scan: unsupported value of position-in-response', # value,
234: 'Too many index terms processed', # number of terms,
235: 'Database does not exist', # database name,
236: 'Access to specified database denied', # database name,
237: 'Sort: illegal sort', # (unspecified),
238: 'Record not available in requested syntax', # alternative suggested syntax(es),
239: 'Record syntax not supported', # syntax,
240: 'Scan: Resources exhausted looking for satisfying terms', # (unspecified),
241: 'Scan: Beginning or end of term list', # (unspecified),
242: 'Segmentation: max-segment-size too small to segment record', # smallest acceptable size,
243: 'Present: additional-ranges parameter not supported', # (unspecified),
244: 'Present: comp-spec parameter not supported', # (unspecified),
245: "Type-1 query: restriction ('resultAttr') operand not supported:", # (unspecified),
246: "Type-1 query: 'complex' attributeValue not supported", # (unspecified),
247: "Type-1 query: 'attributeSet' as part of AttributeElement not supported", # (unspecified),
1001: 'Malformed APDU',
1002: 'ES: EXTERNAL form of Item Order request not supported.', # ,
1003: 'ES: Result set item form of Item Order request not supported.', # ,
1004: 'ES: Extended services not supported unless access control is in effect.', # ,
1005: 'Response records in Search response not supported.', # ,
1006: 'Response records in Search response not possible for specified database (or database combination). See note 1.', # ,
1007: 'No Explain server. See note 2.', # pointers to servers that have a surrogate Explain database for this server.,
1008: 'ES: missing mandatory parameter for specified function', # parameter,
1009: 'ES: Item Order, unsupported OID in itemRequest.', # OID,
1010: 'Init/AC: Bad Userid', # ,
1011: 'Init/AC: Bad Userid and/or Password', # ,
1012: 'Init/AC: No searches remaining (pre-purchased searches exhausted)', # ,
1013: 'Init/AC: Incorrect interface type (specified id valid only when used with a particular access method or client)', # ,
1014: 'Init/AC: Authentication System error', # ,
1015: 'Init/AC: Maximum number of simultaneous sessions for Userid', # ,
1016: 'Init/AC: Blocked network address', # ,
1017: 'Init/AC: No databases available for specified userId', # ,
1018: 'Init/AC: System temporarily out of resources', # ,
1019: 'Init/AC: System not available due to maintenance', # when it's expected back up,
1020: 'Init/AC: System temporarily unavailable', # when it's expected back up,
1021: 'Init/AC: Account has expired', # ,
1022: 'Init/AC: Password has expired so a new one must be supplied', # ,
1023: 'Init/AC: Password has been changed by an administrator so a new one must be supplied', # ,
1024: 'Unsupported Attribute. See note 3.', # an unstructured string indicating the object identifier of the attribute set id, the numeric value of the attribute type, and the numeric value of the attribute.,
1025: 'Service not supported for this database', # ,
1026: 'Record cannot be opened because it is locked', # ,
1027: 'SQL error', # ,
1028: 'Record deleted', # ,
1029: 'Scan: too many terms requested.', # Addinfo: max terms supported,
1040: 'ES: Invalid function', # function,
1041: 'ES: Error in retention time', # (unspecified),
1042: 'ES: Permissions data not understood', # permissions,
1043: 'ES: Invalid OID for task specific parameters', # oid,
1044: 'ES: Invalid action', # action,
1045: 'ES: Unknown schema', # schema,
1046: 'ES: Too many records in package', # maximum number allowed,
1047: 'ES: Invalid wait action', # wait action,
1048: 'ES: Cannot create task package -- exceeds maximum permissable size (see note 4)', # maximum task package size,
1049: 'ES: Cannot return task package -- exceeds maximum permissable size for ES response (see note 5)', # maximum task package size for ES response,
1050: 'ES: Extended services request too large (see note 6)', # maximum size of extended services request,
1051: 'Scan: Attribute set id required -- not supplied', # ,
1052: 'ES: Cannot process task package record -- exceeds maximum permissible record size for ES (see note 7)', # maximum record size for ES,
1053: 'ES: Cannot return task package record -- exceeds maximum permissible record size for ES response (see note 8)', # maximum record size for ES response,
1054: 'Init: Required negotiation record not included', # oid(s) of required negotiation record(s),
1055: 'Init: negotiation option required', # ,
1056: 'Attribute not supported for database', # attribute (oid, type, and value), and database name,
1057: 'ES: Unsupported value of task package parameter (See Note 9)', # parameter and value,
1058: 'Duplicate Detection: Cannot dedup on requested record portion', # ,
1059: 'Duplicate Detection: Requested detection criterion not supported', # detection criterion,
1060: 'Duplicate Detection: Requested level of match not supported', # ,
1061: 'Duplicate Detection: Requested regular expression not supported', # ,
1062: 'Duplicate Detection: Cannot do clustering', # ,
1063: 'Duplicate Detection: Retention criterion not supported', # retention criterion,
1064: 'Duplicate Detection: Requested number (or percentage) of entries for retention too large', # ,
1065: 'Duplicate Detection: Requested sort criterion not supported', # sort criterion,
1066: 'CompSpec: Unknown schema, or schema not supported.', # ,
1067: 'Encapsulation: Encapsulated sequence of PDUs not supported.', # specific unsupported sequence,
1068: 'Encapsulation: Base operation (and encapsulated PDUs) not executed based on pre-screening analysis.', # ,
1069: 'No syntaxes available for this request. See note 10.', # ,
1070: 'user not authorized to receive record(s) in requested syntax', # ,
1071: 'preferredRecordSyntax not supplied', # ,
1072: 'Query term includes characters that do not translate into the target character set.', # Characters that do not translate
}
def lookup_errmsg (condition, oid):
if oid <> oids.Z3950_DIAG_BIB1_ov:
return "Unknown oid: %s condition %d" % (str (oid), condition)
if msg_dict.has_key (condition):
return msg_dict[condition]
else:
return "Unknown BIB-1 error condition %d" % (condition,)

View File

@ -1,406 +0,0 @@
#!/usr/local/bin/python2.3
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from PyZ3950 import z3950, oids
from types import IntType, StringType, ListType
# We need "\"\"" to be one token
from PyZ3950.CQLParser import CQLshlex
from PyZ3950.CQLUtils import ZCQLConfig
from PyZ3950.zdefs import make_attr
zconfig = ZCQLConfig()
"""
http://cheshire.berkeley.edu/cheshire2.html#zfind
top ::= query ['resultsetid' name]
query ::= query boolean clause | clause
clause ::= '(' query ')'
| attributes [relation] term
| resultset
attributes ::= '[' { [set] type '=' value } ']' | name
boolean ::= 'and' | 'or' | 'not' | (synonyms)
prox ::= ('!PROX' | (synonyms)) {'/' name}
relation ::= '>' | '<' | ...
[bib1 1=5, bib1 3=6] > term and title @ fish
"""
booleans = {'AND' : 'and',
'.AND.' : 'and',
'&&' : 'and',
'OR' : 'or',
'.OR.' : 'or',
'||' : 'or',
'NOT' : 'and-not',
'.NOT.' : 'and-not',
'ANDNOT' : 'and-not',
'.ANDNOT.' : 'and-not',
'!!' : 'and-not'
}
relations = {'<' : 1,
'LT' : 1,
'.LT.' : 1,
'<=' : 2,
'LE' : 2,
'.LE.' : 2,
'=' : 3,
'>=' : 4,
'GE' : 4,
'.GE.' : 4,
'>' : 5,
'GT' : 5,
'.GT.' : 5,
'<>' : 6,
'!=' : 6,
'NE' : 6,
'.NE.' : 6,
'?' : 100,
'PHON' : 100,
'.PHON.' : 100,
'%' : 101,
'STEM' : 101,
'.STEM.' : 101,
'@' : 102,
'REL' : 102,
'.REL.' : 102,
'<=>' : 104,
'WITHIN' : 104,
'.WITHIN.' : 104}
geoRelations = {'>=<' : 7,
'.OVERLAPS.' : 7,
'>#<' : 8,
'.FULLY_ENCLOSED_WITHIN.' : 8,
'<#>' : 9,
'.ENCLOSES.' : 9,
'<>#' : 10,
'.OUTSIDE_OF.' : 10,
'+-+' : 11,
'.NEAR.' : 11,
'.#.' : 12,
'.MEMBERS_CONTAIN.' : 12,
'!.#.' : 13,
'.MEMBERS_NOT_CONTAIN.' : 13,
':<:' : 14,
'.BEFORE.' : 14,
':<=:' : 15,
'.BEFORE_OR_DURING.' : 15,
':=:' : 16,
'.DURING.' : 16,
':>=:' : 17,
'.DURING_OR_AFTER.' : 17,
':>:' : 18,
'.AFTER.' : 18}
proxBooleans = {'!PROX' : (2, 0, 2),
'!ADJ' : (2, 0, 2),
'!NEAR' : (20, 0, 2),
'!FAR' : (20, 0, 4),
'!OPROX' : (2, 1, 2),
'!OADJ' : (2, 1, 2),
'!ONEAR' : (20, 1, 2),
'!OFAR' : (20, 1, 4)}
proxUnits = {'C' : 1,
'CHAR' : 1,
'W' : 2,
'WORD' : 2,
'S' : 3,
'SENT' : 3,
'SENTENCE' : 3,
'P' : 4,
'PARA' : 4,
'PARAGRAPH' : 4,
'SECTION' : 5,
'CHAPTER' : 6,
'DOCUMENT' : 7,
'ELEMENT' : 8,
'SUBELEMENT' : 9,
'ELEMENTTYPE' : 10,
'BYTE' : 11}
privateBooleans = {'!FUZZY_AND' : 1,
'!FUZZY_OR' : 2,
'!FUZZY_NOT' : 3,
'!RESTRICT_FROM' : 4,
'!RESTRICT_TO' : 5,
'!MERGE_SUM' : 6,
'!MERGE_MEAN' : 7,
'!MERGE_NORM' : 8}
xzconfig = ZCQLConfig()
class C2Parser:
lexer = None
currentToken = None
nextToken = None
def __init__(self, l):
self.lexer = l
self.fetch_token()
def fetch_token(self):
tok = self.lexer.get_token()
self.currentToken = self.nextToken
self.nextToken = tok
def is_boolean(self, tok=None):
if (tok == None):
tok = self.currentToken
if (privateBooleans.has_key(tok.upper())):
return 1
elif (booleans.has_key(tok.upper())):
return 2
elif (proxBooleans.has_key(tok.upper())):
return 3
else:
return 0
def top(self):
rpn = self.query()
# Check for resultsetid
if (self.currentToken.lower() == 'resultsetid'):
self.fetch_token()
resultset = self.currentToken
else:
resultset = None
rpnq = z3950.RPNQuery()
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
rpnq.rpn = rpn
q = ('type_1', rpnq)
return (q, resultset)
def query(self):
self.fetch_token()
left = self.subquery()
while 1:
if not self.currentToken:
break
bool = self.is_boolean()
if bool:
bool = self.boolean()
right = self.subquery()
# Put left into triple, make triple new left
op = z3950.RpnRpnOp()
op.rpn1 = left
op.rpn2 = right
op.op = bool
wrap = ('rpnRpnOp', op)
left = wrap
else:
break
return left
def subquery(self):
if self.currentToken == "(":
object = self.query()
if (self.currentToken <> ")"):
raise ValueError
else:
self.fetch_token()
else:
object = self.clause()
return object
def boolean(self):
tok = self.currentToken.upper()
self.fetch_token()
if (booleans.has_key(tok)):
return (booleans[tok], None)
elif (privateBooleans.has_key(tok)):
# Generate cutesie prox trick
type = privateBooleans[tok]
prox = z3950.ProximityOperator()
prox.proximityUnitCode = ('private', type)
prox.distance = 0
prox.ordered = 0
prox.relationType = 3
return ('op', ('prox', prox))
elif (proxBooleans.has_key(tok)):
# Generate prox
prox = z3950.ProximityOperator()
stuff = proxBooleans[tok]
prox.distance = stuff[0]
prox.ordered = stuff[1]
prox.relationType = stuff[2]
prox.proximityUnitCode = ('known', 2)
# Now look for /
while (self.currentToken == "/"):
self.fetch_token()
if (self.currentToken.isdigit()):
prox.distance = int(self.currentToken)
elif (proxUnits.has_key(self.currentToken.upper())):
prox.proximityUnitCode = ('known', proxUnits[self.currentToken.upper()])
else:
raise ValueError
self.fetch_token()
return ('op', ('prox', prox))
else:
# Argh!
raise ValueError
def clause(self):
if (self.is_boolean(self.nextToken) or not self.nextToken or self.nextToken.lower() == 'resultsetid' or self.nextToken == ")"):
# Must be a resultset
tok = self.currentToken
self.fetch_token()
return ('op', ('resultSet', tok))
elif (self.currentToken == '['):
# List of attributes
attrs = []
oidHash = oids.oids['Z3950']['ATTRS']
while (1):
self.fetch_token()
if (self.currentToken == ']'):
break
if (oidHash.has_key(self.currentToken)):
attrSet = oidHash[self.currentToken]['ov']
self.fetch_token()
elif (self.currentToken[:8] == '1.2.840.'):
attrSet = asn1.OidVal(map(int, self.currentToken.split('.')))
self.fetch_token()
else:
attrSet = None
if (self.currentToken[-1] == ','):
tok = self.currentToken[:-1]
else:
tok = self.currentToken
if (tok.isdigit()):
# 1 = foo
atype = int(tok)
self.fetch_token()
if (self.currentToken == '='):
# = foo
self.fetch_token()
if (self.currentToken[0] == '='):
# =foo
tok = self.currentToken[1:]
else:
tok = self.currentToken
if (tok[-1] == ','):
tok = tok[:-1]
if (tok.isdigit()):
val = int(tok)
else:
val = tok
if (val[0] == "'" and val[-1] == "'"):
val = val[1:-1]
elif (tok[-1] == '='):
#1= foo
tok = tok[:-1]
if (tok.isdigit()):
atype = int(tok)
self.fetch_token()
if (self.currentToken[-1] == ","):
tok = self.currentToken[:-1]
else:
tok = self.currentToken
if (tok.isdigit()):
val = int(self.currentToken)
else:
val = tok
if (val[0] == "'" and val[-1] == "'"):
val = val[1:-1]
elif (tok.find('=') > -1):
# 1=foo
(atype, val) = self.currentToken.split('=')
atype = int(atype)
if (val[-1] == ","):
val = val[:-1]
if (val.isdigit()):
val = int(val)
elif (val[0] == "'" and val[-1] == "'"):
val = val[1:-1]
else:
# ???
raise ValueError
attrs.append([attrSet, atype, val])
else:
# Check for named index
if (zconfig.BIB1.has_key(self.currentToken.lower())):
attrs = [[oids.Z3950_ATTRS_BIB1_ov, 1, zconfig.BIB1[self.currentToken.lower()]]]
else:
# Just pass through the name
attrs = [[oids.Z3950_ATTRS_BIB1_ov, 1, self.currentToken]]
self.fetch_token()
# Check for relation
tok = self.currentToken.upper()
if (relations.has_key(tok)):
val = relations[tok]
found = 0
for a in attrs:
if (a[0] in [oids.Z3950_ATTRS_BIB1, None] and a[1] == 2):
found =1
a[2] = val
break
if (not found):
attrs.append([None, 2, val])
self.fetch_token()
elif (geoRelations.has_key(tok)):
val = geoRelations[tok]
found = 0
for a in attrs:
if (a[0] in [oids.Z3950_ATTRS_BIB1, oids.Z3950_ATTRS_GEO, None] and a[1] == 2):
found = 1
a[2] = val
break
if (not found):
attrs.append([oids.Z3950_ATTRS_GEO, 2, val])
self.fetch_token()
if (self.currentToken.find(' ')):
# Already quoted
term = self.currentToken
else:
# Accumulate
term = []
while (self.currentToken and not self.is_boolean(self.currentToken) and self.currentToken.lower() != 'resultsetid'):
term.append(self.currenToken)
term = ' '.join(term)
self.fetch_token()
# Phew. Now build AttributesPlusTerm
clause = z3950.AttributesPlusTerm()
clause.attributes = [make_attr(*e) for e in attrs]
clause.term = ('general', term)
return ('op', ('attrTerm', clause))
def parse(q):
query = StringIO(q)
lexer = CQLshlex(query)
# Override CQL's wordchars list to include /=><
lexer.wordchars += "!@#$%^&*-+;,.?|~`:\\><='"
lexer.wordchars = lexer.wordchars.replace('[', '')
lexer.wordchars = lexer.wordchars.replace(']', '')
parser = C2Parser(lexer)
return parser.top()

View File

@ -1,365 +0,0 @@
#!/usr/bin/env python
"""Implements part of CCL, the Common Command Language, ISO 8777. I'm
working from the description in the YAZ toolkit
(http://www.indexdata.dk/yaz/doc/tools.php), rather than the ISO
spec. Two extensions:
- qualifiers can be literal "(attrtyp, attrval)" pairs, so, e.g., the
following is a legitimate for ISBN: "(1,7)=0312033095"
- the optional ATTRSET (attrset/query) which must appear at the beginning
of the string.
Allowed values are:
BIB1 (default)
XD1
UTIL
ZTHES1
EXP1
or an oid expressed as a dotted string. (A leading dot implies a
prefix of 1.2.840.1003.3, so, e.g., .1 is the same as BIB1.)
Eventually I will support v3-style mixing attribute sets within
a single query, but for now I don't.
"""
from __future__ import nested_scopes
import string
in_setup = 0
try:
from PyZ3950 import z3950
from PyZ3950 import oids
from PyZ3950 import asn1
_attrdict = {
'bib1' : oids.Z3950_ATTRS_BIB1_ov,
'zthes1': oids.Z3950_ATTRS_ZTHES_ov,
'xd1': oids.Z3950_ATTRS_XD1_ov,
'utility': oids.Z3950_ATTRS_UTIL_ov,
'exp1': oids.Z3950_ATTRS_EXP1_ov
}
except ImportError, err:
print "Error importing (OK during setup)", err
in_setup = 1
class QuerySyntaxError(Exception): pass
class ParseError(QuerySyntaxError): pass
class LexError(QuerySyntaxError): pass
class UnimplError(QuerySyntaxError): pass
tokens = ('LPAREN', 'RPAREN', 'COMMA',
'SET', 'ATTRSET','QUAL', 'QUOTEDVALUE', 'RELOP', 'WORD',
'LOGOP', 'SLASH')
t_LPAREN= r'\('
t_RPAREN= r'\)'
t_COMMA = r','
t_SLASH = r'/'
def t_ATTRSET(t):
r'(?i)ATTRSET'
return t
def t_SET (t): # need to def as function to override parsing as WORD, gr XXX
r'(SET)'
return t
relop_to_attrib = {
'<': 1,
'<=': 2,
'=': 3,
'>=': 4,
'>': 5,
'<>': 6}
t_RELOP = "|".join (["(%s)" % r for r in relop_to_attrib.keys()])
# XXX Index Data docs say 'doesn't follow ... ISO8777'?
# XXX expand to rd. addt'l defns from file?
qual_dict = { # These are bib-1 attribute values, see
# http://www.loc.gov/z3950/agency/defns/bib1.html and ftp://ftp.loc.gov/pub/z3950/defs/bib1.txt
'TI': (1,4),
'AU': (1,1003), # use 1003 to work w/ both NLC-BNC and LC
'ISBN': (1,7),
'LCCN': (1,9),
'ANY': (1,1016),
'FIF': (3, 1), # first-in-field
'AIF': (3,3), # any-in-field (default)
'RTRUNC': (5,1),
'NOTRUNC': (5,100) # (default)
}
default_quals = ['ANY'] # XXX should be per-attr-set
default_relop = '='
def t_QUAL(t):
return t
def mk_quals ():
quals = ("|".join (map (lambda x: '(' + x + ')', qual_dict.keys())))
t_QUAL.__doc__ = "(?i)" + quals + r"|(\([0-9]+,[0-9]+\))"
def t_QUOTEDVALUE(t):
r"(\".*?\")"
if t.value[0] == '"':
t.value = t.value[1:-1]
return t
word_init = "[a-z]|[A-Z]|[0-9]|&|:"
word_non_init = ",|\.|\'"
t_WORD = "(%s)(%s|%s)*" % (word_init, word_init, word_non_init)
def t_LOGOP(t):
r'(?i)(AND)|(OR)|(NOT)'
return t
t_ignore = " \t"
def t_error(t):
raise LexError ('t_error: ' + str (t))
from ply import lex
def relex ():
global lexer
mk_quals ()
lexer = lex.lex()
relex ()
def add_qual (qual_name, val):
"""Add a qualifier definition, and regenerate the lexer."""
qual_dict[qual_name] = val
relex ()
from ply import yacc
#if in_setup:
# import yacc
#else:
# from PyZ3950 import yacc
class Node:
def __init__(self,type,children=None,leaf=None):
self.type = type
if children:
self.children = children
else:
self.children = [ ]
self.leaf = leaf
def str_child (self, child, depth):
if isinstance (child, Node): # ugh
return child.str_depth (depth)
indent = " " * (4 * depth)
return indent + str (child) + "\n"
def str_depth (self, depth): # ugh
indent = " " * (4 * depth)
l = ["%s%s %s" % (indent, self.type, self.leaf)]
l.append ("".join (map (lambda s: self.str_child (s, depth + 1),
self.children)))
return "\n".join (l)
def __str__(self):
return "\n" + self.str_depth (0)
def p_top (t):
'top : cclfind_or_attrset'
t[0] = t[1]
def p_cclfind_or_attrset_1 (t):
'cclfind_or_attrset : cclfind'
t[0] = t[1]
def p_cclfind_or_attrset_2 (t):
'cclfind_or_attrset : ATTRSET LPAREN WORD SLASH cclfind RPAREN'
t[0] = Node ('attrset', [t[5]], t[3])
def p_ccl_find_1(t):
'cclfind : cclfind LOGOP elements'
t[0] = Node ('op', [t[1],t[3]], t[2])
def p_ccl_find_2(t):
'cclfind : elements'
t[0] = t[1]
def p_elements_1(t):
'elements : LPAREN cclfind RPAREN'
t[0] = t[2]
class QuallistVal:
def __init__ (self, quallist, val):
self.quallist = quallist
self.val = val
def __str__ (self):
return "QV: %s %s" % (str(self.quallist),str (self.val))
def __getitem__ (self, i):
if i == 0: return self.quallist
if i == 1: return self.val
raise IndexError ('QuallistVal err ' + str (i))
def xlate_qualifier (x):
if x[0] == '(' and x[-1] == ')':
t = x[1:-1].split (',') # t must be of len 2 b/c of lexer
return (string.atoi (t[0]), string.atoi (t[1]))
return qual_dict[(x.upper ())]
def p_elements_2 (t):
'elements : SET RELOP WORD'
if t[2] <> '=':
raise QuerySyntaxError (str (t[1], str (t[2]), str (t[3])))
t[0] = Node ('set', leaf = t[3])
def p_elements_3(t):
'elements : val'
t[0] = Node ('relop', QuallistVal (map (xlate_qualifier, default_quals), t[1]), default_relop)
def p_elements_4(t):
'elements : quallist RELOP val'
t[0] = Node ('relop', QuallistVal(map (xlate_qualifier, t[1]),t[3]), t[2])
# XXX p_elements_5 would be quals followed by recursive def'n, not yet implemented
# XXX p_elements_6 would be quals followed by range, not yet implemented.
def p_quallist_1 (t):
'quallist : QUAL'
t[0] = [t[1]]
def p_quallist_2 (t):
'quallist : quallist COMMA QUAL'
t[0] = t[1] + [t[3]]
def p_val_1(t):
'val : QUOTEDVALUE'
t[0] = t[1]
def p_val_2(t):
'val : val WORD'
t[0] = t[1] + " " + t[2]
def p_val_3(t):
'val : WORD'
t[0] = t[1]
# XXX also don't yet handle proximity operator
def p_error(t):
raise ParseError ('Parse p_error ' + str (t))
precedence = (
('left', 'LOGOP'),
)
yacc.yacc (debug=0, tabmodule = 'PyZ3950_parsetab')
#yacc.yacc (debug=0, tabpackage = 'PyZ3950', tabmodule='PyZ3950_parsetab')
def attrset_to_oid (attrset):
l = attrset.lower ()
if _attrdict.has_key (l):
return _attrdict [l]
split_l = l.split ('.')
if split_l[0] == '':
split_l = oids.Z3950_ATTRS + split_l[1:]
try:
intlist = map (string.atoi, split_l)
except ValueError:
raise ParseError ('Bad OID: ' + l)
return asn1.OidVal (intlist)
def tree_to_q (ast):
if ast.type == 'op':
myrpnRpnOp = z3950.RpnRpnOp ()
myrpnRpnOp.rpn1 = tree_to_q(ast.children[0])
myrpnRpnOp.rpn2 = tree_to_q(ast.children[1])
op = ast.leaf.lower ()
if op == 'not': op = 'and-not' # CCL spec of 'not' vs. Z39.50 spec of 'and-not'
myrpnRpnOp.op = (op, None)
return ('rpnRpnOp', myrpnRpnOp)
elif ast.type == 'relop':
# XXX but e.g. LC (http://lcweb.loc.gov/z3950/lcserver.html)
# doesn't support other relation attributes, either.
try:
relattr = relop_to_attrib [ast.leaf]
except KeyError: # should never happen, how could we have lexed it?
raise UnimplError (ast.leaf)
def make_aelt (qual):
val = ('numeric', qual [1])
return z3950.AttributeElement (attributeType = qual[0],
attributeValue = val)
apt = z3950.AttributesPlusTerm ()
quallist = ast.children.quallist
if ast.leaf <> '=':
quallist.append ((2,relattr)) # 2 is relation attribute
# see http://www.loc.gov/z3950/agency/markup/13.html ATR.1.1
apt.attributes = map (make_aelt, quallist)
apt.term = ('general', ast.children.val) # XXX update for V3?
return ('op', ('attrTerm', apt))
elif ast.type == 'set':
return ('op', ('resultSet', ast.leaf))
raise UnimplError("Bad ast type " + str(ast.type))
def mk_rpn_query (query):
"""Transform a CCL query into an RPN query."""
# need to copy or create a new lexer because it contains globals
# PLY 1.0 lacks __copy__
# PLY 1.3.1-1.5 have __copy__, but it's broken and returns None
# I sent David Beazley a patch, so future PLY releases will
# presumably work correctly.
# Recreating the lexer each time is noticeably slower, so this solution
# is suboptimal for PLY <= 1.5, but better than being thread-unsafe.
# Perhaps I should have per-thread lexer instead XXX
# with example/twisted/test.py set to parse_only, I get 277 parses/sec
# with fixed PLY, vs. 63 parses/sec with broken PLY, on my 500 MHz PIII
# laptop.
copiedlexer = None
if hasattr (lexer, '__copy__'):
copiedlexer = lexer.__copy__ ()
if copiedlexer == None:
copiedlexer = lex.lex ()
ast = yacc.parse (query, copiedlexer)
return ast_to_rpn (ast)
def ast_to_rpn (ast):
if ast.type == 'attrset':
attrset = attrset_to_oid (ast.leaf)
ast = ast.children [0]
else:
attrset = oids.Z3950_ATTRS_BIB1_ov
rpnq = z3950.RPNQuery (attributeSet = attrset)
rpnq.rpn = tree_to_q (ast)
return ('type_1', rpnq)
def testlex (s):
lexer.input (s)
while 1:
token = lexer.token ()
if not token:
break
print token
def testyacc (s):
copylex = lexer.__copy__ ()
ast = yacc.parse (s, lexer = copylex)
print "AST:", ast
print "RPN Query:", ast_to_rpn (ast)
if __name__ == '__main__':
testfn = testyacc
# testfn = testlex
testfn ('attrset (BIB1/ au="Gaiman, Neil" or ti=Sandman)')
while 1:
s = raw_input ('Query: ')
if len (s) == 0:
break
testfn (s)
# testyacc ()
# testlex ()

View File

@ -1,53 +0,0 @@
#!/usr/bin/env python
assert (0)
# XXX shouldn't use, absorbed into z3950_2001.py
#from PyZ3950 import asn1
import asn1
InitialSet=asn1.SEQUENCE ([('g0',None,asn1.TYPE(asn1.IMPLICIT(0,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1),
('g1',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1),
('g2',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1),
('g3',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1),
('c0',None,asn1.TYPE(asn1.IMPLICIT(4,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),0),
('c1',None,asn1.TYPE(asn1.IMPLICIT(5,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1)])
PrivateCharacterSet=asn1.CHOICE ([('viaOid',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (asn1.OBJECT_IDENTIFIER))),
('externallySpecified',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),asn1.EXTERNAL)),
('previouslyAgreedUpon',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.NULL))])
LeftAndRight=asn1.SEQUENCE ([('gLeft',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),0),
('gRight',None,asn1.TYPE(asn1.IMPLICIT(4,cls=asn1.CONTEXT_FLAG),asn1.INTEGER),1)])
Iso10646=asn1.SEQUENCE ([('collections',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG),asn1.OBJECT_IDENTIFIER),1),
('encodingLevel',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),asn1.OID),0)])
LanguageCode=asn1.GeneralString
Environment=asn1.CHOICE ([('sevenBit',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG),asn1.NULL)),
('eightBit',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),asn1.NULL))])
Iso2022=asn1.CHOICE ([('originProposal',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE ([('proposedEnvironment',None,asn1.TYPE(asn1.EXPLICIT(0,cls=asn1.CONTEXT_FLAG),Environment),1),
('proposedSets',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (asn1.INTEGER)),0),
('proposedInitialSets',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (InitialSet)),0),
('proposedLeftAndRight',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),LeftAndRight),0)]))),
('targetResponse',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE ([('selectedEnvironment',None,asn1.TYPE(asn1.EXPLICIT(0,cls=asn1.CONTEXT_FLAG),Environment),0),
('selectedSets',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (asn1.INTEGER)),0),
('selectedinitialSet',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),InitialSet),0),
('selectedLeftAndRight',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),LeftAndRight),0)])))])
TargetResponse=asn1.SEQUENCE ([('selectedCharSets',None,asn1.TYPE(asn1.EXPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.CHOICE ([('iso2022',None,asn1.TYPE(asn1.EXPLICIT(1,cls=asn1.CONTEXT_FLAG),Iso2022)),
('iso10646',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),Iso10646)),
('private',None,asn1.TYPE(asn1.EXPLICIT(3,cls=asn1.CONTEXT_FLAG),PrivateCharacterSet)),
('none',None,asn1.TYPE(asn1.IMPLICIT(4,cls=asn1.CONTEXT_FLAG),asn1.NULL))])),1),
('selectedLanguage',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),LanguageCode),1),
('recordsInSelectedCharSets',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.BOOLEAN),1)])
OriginProposal=asn1.SEQUENCE ([('proposedCharSets',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF ( asn1.CHOICE ([('iso2022',None,asn1.TYPE(asn1.EXPLICIT(1,cls=asn1.CONTEXT_FLAG),Iso2022)),
('iso10646',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),Iso10646)),
('private',None,asn1.TYPE(asn1.EXPLICIT(3,cls=asn1.CONTEXT_FLAG),PrivateCharacterSet))]))),1),
('proposedlanguages',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG), asn1.SEQUENCE_OF (LanguageCode)),1),
('recordsInSelectedCharSets',None,asn1.TYPE(asn1.IMPLICIT(3,cls=asn1.CONTEXT_FLAG),asn1.BOOLEAN),1)])
CharSetandLanguageNegotiation=asn1.CHOICE ([('proposal',None,asn1.TYPE(asn1.IMPLICIT(1,cls=asn1.CONTEXT_FLAG),OriginProposal)),
('response',None,asn1.TYPE(asn1.IMPLICIT(2,cls=asn1.CONTEXT_FLAG),TargetResponse))])

View File

@ -1,65 +0,0 @@
#!/usr/bin/env python
# Original by Robert Sanderson, modifications by Aaron Lav
import sys
from PyZ3950 import asn1
inh = file("oids.txt")
outh = file("oids.py", "w")
outh.write('from PyZ3950 import asn1\n')
# from ... to get same globals as others importing asn1
outh.write('oids = {}\n')
oids = {}
vars = {}
for line in inh:
if (not line.isspace()):
flds = line.split(None)
name = flds[0]
number = flds[1]
if (len(flds) > 2):
aliasList = flds[2:]
else:
aliasList = []
if (number[0] == "."):
# add to previous
splitname = name.split("_")
cur = oids
for n in splitname[:-1]:
cur = cur[n]
val = cur['val'] + [int(number[1:])]
oid = asn1.OidVal(val)
cur [splitname[-1]] = {'oid': oid, 'val' : val}
vars[name] = val
tree = "oids['%s']" % "']['".join (splitname)
outh.write(tree + " = " + "{'oid': asn1.OidVal(" + str(val) + "), 'val': " + str(val) + "}\n")
else:
# base
splitnums = number.split('.')
numlist = map(int, splitnums)
oids[name] = {}
oids[name]['oid'] = asn1.OidVal(numlist)
oids[name]['val'] = numlist
vars[name] = numlist
outh.write("oids['" + name + "'] = {'oid': asn1.OidVal(" + str(numlist) + "), 'val': " + str(numlist) + "}\n")
inh.close()
items = vars.items()
items.sort()
for k,v in items:
outh.write(k + " = " + str(v) + "\n")
outh.write(k + "_ov = asn1.OidVal(" + str (v) + ")\n")
outh.close()

View File

@ -1,71 +0,0 @@
#!/usr/bin/env python
"""Utility functions for GRS-1 data"""
from __future__ import nested_scopes
# XXX still need to tag non-leaf nodes w/ (tagType, tagValue)
# XXX tagType can be omitted. If so, default either supplied
# dynamically by tagSet-M or statically spec'd by schema
# from TAG (Z39.50-1995 App 12): tagType 1 is tagSet-M, 2 tagSet-G,
# 3 locally defined.
class Node:
"""Defined members are:
tag - tag (always present, except for top node)
metadata - metadata (opt, seriesOrder only for nonleaf - v. RET.3.2.3 )
children - list of Node
leaf - leaf data (children and leaf are mutually exclusive)
"""
def __init__ (self, **kw):
self.__dict__.update (kw)
self.tab_size = 3 # controls str() indentation width
def str_depth (self, depth):
l = []
children = getattr (self, 'children', [])
leaf = getattr (self, 'leaf', None)
tag = getattr (self, 'tag', None)
indent = " " * (self.tab_size * depth)
if leaf <> None:
l.append ("%s%s %s" % (
indent, str (tag), leaf.content))
else:
if tag <> None:
l.append (indent + str (tag))
meta = getattr (self, 'metadata', None)
if meta <> None:
l.append (indent + 'metadata: ' + str (meta))
l.append ("".join (map (
lambda n: n.str_depth (depth + 1), children)))
return "\n".join (l)
def __str__ (self):
return "\n" + self.str_depth (-1)
def preproc (raw):
"""Transform the raw output of the asn.1 decoder into something
a bit more programmer-friendly. (This is automatically called
by the ZOOM API, so you don't need to worry about it unless you're
using the raw z3950 API.)
"""
if isinstance (raw, type ([])):
return Node (children = map (preproc, raw))
else: # TaggedElement
kw = {}
tag = (raw.tagType, raw.tagValue [1])
# Value [0] is str vs. num indicator
kw ['tag'] = tag
meta = getattr (raw, 'metaData', None)
if meta <> None:
kw ['metadata'] = meta
if raw.content[0] == 'subtree':
return Node (children = map (preproc, raw.content [1]), **kw)
else:
# tag and metadata are here redundantly encoded as
# both attributes of leaf and of Node. Use the Node
# attribs, I'll try to clean this up sometime.
return Node (leaf = raw, **kw)

File diff suppressed because it is too large Load Diff

View File

@ -1,479 +0,0 @@
from PyZ3950 import asn1
oids = {}
oids['Z3950'] = {'oid': asn1.OidVal([1, 2, 840, 10003]), 'val': [1, 2, 840, 10003]}
oids['Z3950']['ATTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3]), 'val': [1, 2, 840, 10003, 3]}
oids['Z3950']['DIAG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4]), 'val': [1, 2, 840, 10003, 4]}
oids['Z3950']['RECSYN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5]), 'val': [1, 2, 840, 10003, 5]}
oids['Z3950']['TRANSFER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 6]), 'val': [1, 2, 840, 10003, 6]}
oids['Z3950']['RRF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7]), 'val': [1, 2, 840, 10003, 7]}
oids['Z3950']['ACCESS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8]), 'val': [1, 2, 840, 10003, 8]}
oids['Z3950']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9]), 'val': [1, 2, 840, 10003, 9]}
oids['Z3950']['USR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10]), 'val': [1, 2, 840, 10003, 10]}
oids['Z3950']['SPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11]), 'val': [1, 2, 840, 10003, 11]}
oids['Z3950']['VAR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12]), 'val': [1, 2, 840, 10003, 12]}
oids['Z3950']['SCHEMA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13]), 'val': [1, 2, 840, 10003, 13]}
oids['Z3950']['TAGSET'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14]), 'val': [1, 2, 840, 10003, 14]}
oids['Z3950']['NEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15]), 'val': [1, 2, 840, 10003, 15]}
oids['Z3950']['QUERY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16]), 'val': [1, 2, 840, 10003, 16]}
oids['Z3950']['ATTRS']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 1]), 'val': [1, 2, 840, 10003, 3, 1]}
oids['Z3950']['ATTRS']['EXP1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 2]), 'val': [1, 2, 840, 10003, 3, 2]}
oids['Z3950']['ATTRS']['EXT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 3]), 'val': [1, 2, 840, 10003, 3, 3]}
oids['Z3950']['ATTRS']['CCL1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 4]), 'val': [1, 2, 840, 10003, 3, 4]}
oids['Z3950']['ATTRS']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 5]), 'val': [1, 2, 840, 10003, 3, 5]}
oids['Z3950']['ATTRS']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 6]), 'val': [1, 2, 840, 10003, 3, 6]}
oids['Z3950']['ATTRS']['COLLECTIONS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 7]), 'val': [1, 2, 840, 10003, 3, 7]}
oids['Z3950']['ATTRS']['CIMI1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 8]), 'val': [1, 2, 840, 10003, 3, 8]}
oids['Z3950']['ATTRS']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 9]), 'val': [1, 2, 840, 10003, 3, 9]}
oids['Z3950']['ATTRS']['ZBIG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 10]), 'val': [1, 2, 840, 10003, 3, 10]}
oids['Z3950']['ATTRS']['UTIL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 11]), 'val': [1, 2, 840, 10003, 3, 11]}
oids['Z3950']['ATTRS']['XD1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 12]), 'val': [1, 2, 840, 10003, 3, 12]}
oids['Z3950']['ATTRS']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 13]), 'val': [1, 2, 840, 10003, 3, 13]}
oids['Z3950']['ATTRS']['FIN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 14]), 'val': [1, 2, 840, 10003, 3, 14]}
oids['Z3950']['ATTRS']['DAN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 15]), 'val': [1, 2, 840, 10003, 3, 15]}
oids['Z3950']['ATTRS']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 16]), 'val': [1, 2, 840, 10003, 3, 16]}
oids['Z3950']['ATTRS']['MARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 17]), 'val': [1, 2, 840, 10003, 3, 17]}
oids['Z3950']['ATTRS']['BIB2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 18]), 'val': [1, 2, 840, 10003, 3, 18]}
oids['Z3950']['ATTRS']['ZEEREX'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 19]), 'val': [1, 2, 840, 10003, 3, 19]}
oids['Z3950']['DIAG']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 1]), 'val': [1, 2, 840, 10003, 4, 1]}
oids['Z3950']['DIAG']['DIAG1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 2]), 'val': [1, 2, 840, 10003, 4, 2]}
oids['Z3950']['DIAG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 3]), 'val': [1, 2, 840, 10003, 4, 3]}
oids['Z3950']['DIAG']['GENERAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 4]), 'val': [1, 2, 840, 10003, 4, 4]}
oids['Z3950']['RECSYN']['UNIMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 1]), 'val': [1, 2, 840, 10003, 5, 1]}
oids['Z3950']['RECSYN']['INTERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 2]), 'val': [1, 2, 840, 10003, 5, 2]}
oids['Z3950']['RECSYN']['CCF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 3]), 'val': [1, 2, 840, 10003, 5, 3]}
oids['Z3950']['RECSYN']['USMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10]), 'val': [1, 2, 840, 10003, 5, 10]}
oids['Z3950']['RECSYN']['USMARC']['BIBLIO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 1]), 'val': [1, 2, 840, 10003, 5, 10, 1]}
oids['Z3950']['RECSYN']['USMARC']['AUTH'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 2]), 'val': [1, 2, 840, 10003, 5, 10, 2]}
oids['Z3950']['RECSYN']['USMARC']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 3]), 'val': [1, 2, 840, 10003, 5, 10, 3]}
oids['Z3950']['RECSYN']['USMARC']['COMMUNITY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 4]), 'val': [1, 2, 840, 10003, 5, 10, 4]}
oids['Z3950']['RECSYN']['USMARC']['CLASS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 5]), 'val': [1, 2, 840, 10003, 5, 10, 5]}
oids['Z3950']['RECSYN']['UKMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 11]), 'val': [1, 2, 840, 10003, 5, 11]}
oids['Z3950']['RECSYN']['NORMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 12]), 'val': [1, 2, 840, 10003, 5, 12]}
oids['Z3950']['RECSYN']['LIBRISMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 13]), 'val': [1, 2, 840, 10003, 5, 13]}
oids['Z3950']['RECSYN']['DANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 14]), 'val': [1, 2, 840, 10003, 5, 14]}
oids['Z3950']['RECSYN']['FINMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 15]), 'val': [1, 2, 840, 10003, 5, 15]}
oids['Z3950']['RECSYN']['MAB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 16]), 'val': [1, 2, 840, 10003, 5, 16]}
oids['Z3950']['RECSYN']['CANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 17]), 'val': [1, 2, 840, 10003, 5, 17]}
oids['Z3950']['RECSYN']['SBNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 18]), 'val': [1, 2, 840, 10003, 5, 18]}
oids['Z3950']['RECSYN']['PICAMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 19]), 'val': [1, 2, 840, 10003, 5, 19]}
oids['Z3950']['RECSYN']['AUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 20]), 'val': [1, 2, 840, 10003, 5, 20]}
oids['Z3950']['RECSYN']['IBERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 21]), 'val': [1, 2, 840, 10003, 5, 21]}
oids['Z3950']['RECSYN']['CATMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 22]), 'val': [1, 2, 840, 10003, 5, 22]}
oids['Z3950']['RECSYN']['MALMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 23]), 'val': [1, 2, 840, 10003, 5, 23]}
oids['Z3950']['RECSYN']['JPMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 24]), 'val': [1, 2, 840, 10003, 5, 24]}
oids['Z3950']['RECSYN']['SWEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 25]), 'val': [1, 2, 840, 10003, 5, 25]}
oids['Z3950']['RECSYN']['SIGLEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 26]), 'val': [1, 2, 840, 10003, 5, 26]}
oids['Z3950']['RECSYN']['ISDSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 27]), 'val': [1, 2, 840, 10003, 5, 27]}
oids['Z3950']['RECSYN']['RUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 28]), 'val': [1, 2, 840, 10003, 5, 28]}
oids['Z3950']['RECSYN']['HUNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 29]), 'val': [1, 2, 840, 10003, 5, 29]}
oids['Z3950']['RECSYN']['NACSISCATP'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 30]), 'val': [1, 2, 840, 10003, 5, 30]}
oids['Z3950']['RECSYN']['FINMARC2000'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 31]), 'val': [1, 2, 840, 10003, 5, 31]}
oids['Z3950']['RECSYN']['MARC21FIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 32]), 'val': [1, 2, 840, 10003, 5, 32]}
oids['Z3950']['RECSYN']['COMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 33]), 'val': [1, 2, 840, 10003, 5, 33]}
oids['Z3950']['RECSYN']['EXPLAIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 100]), 'val': [1, 2, 840, 10003, 5, 100]}
oids['Z3950']['RECSYN']['SUTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 101]), 'val': [1, 2, 840, 10003, 5, 101]}
oids['Z3950']['RECSYN']['OPAC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 102]), 'val': [1, 2, 840, 10003, 5, 102]}
oids['Z3950']['RECSYN']['SUMMARY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 103]), 'val': [1, 2, 840, 10003, 5, 103]}
oids['Z3950']['RECSYN']['GRS0'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 104]), 'val': [1, 2, 840, 10003, 5, 104]}
oids['Z3950']['RECSYN']['GRS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 105]), 'val': [1, 2, 840, 10003, 5, 105]}
oids['Z3950']['RECSYN']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 106]), 'val': [1, 2, 840, 10003, 5, 106]}
oids['Z3950']['RECSYN']['FRAGMENT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 107]), 'val': [1, 2, 840, 10003, 5, 107]}
oids['Z3950']['RECSYN']['MIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109]), 'val': [1, 2, 840, 10003, 5, 109]}
oids['Z3950']['RECSYN']['MIME']['PDF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 1]), 'val': [1, 2, 840, 10003, 5, 109, 1]}
oids['Z3950']['RECSYN']['MIME']['POSTSCRIPT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 2]), 'val': [1, 2, 840, 10003, 5, 109, 2]}
oids['Z3950']['RECSYN']['MIME']['HTML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 3]), 'val': [1, 2, 840, 10003, 5, 109, 3]}
oids['Z3950']['RECSYN']['MIME']['TIFF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 4]), 'val': [1, 2, 840, 10003, 5, 109, 4]}
oids['Z3950']['RECSYN']['MIME']['GIF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 5]), 'val': [1, 2, 840, 10003, 5, 109, 5]}
oids['Z3950']['RECSYN']['MIME']['JPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 6]), 'val': [1, 2, 840, 10003, 5, 109, 6]}
oids['Z3950']['RECSYN']['MIME']['PNG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 7]), 'val': [1, 2, 840, 10003, 5, 109, 7]}
oids['Z3950']['RECSYN']['MIME']['MPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 8]), 'val': [1, 2, 840, 10003, 5, 109, 8]}
oids['Z3950']['RECSYN']['MIME']['SGML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 9]), 'val': [1, 2, 840, 10003, 5, 109, 9]}
oids['Z3950']['RECSYN']['MIME']['XML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 10]), 'val': [1, 2, 840, 10003, 5, 109, 10]}
oids['Z3950']['RECSYN']['ZMIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110]), 'val': [1, 2, 840, 10003, 5, 110]}
oids['Z3950']['RECSYN']['ZMIME']['TIFFB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 1]), 'val': [1, 2, 840, 10003, 5, 110, 1]}
oids['Z3950']['RECSYN']['ZMIME']['WAV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 2]), 'val': [1, 2, 840, 10003, 5, 110, 2]}
oids['Z3950']['RECSYN']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 111]), 'val': [1, 2, 840, 10003, 5, 111]}
oids['Z3950']['RRF']['RESOURCE1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 1]), 'val': [1, 2, 840, 10003, 7, 1]}
oids['Z3950']['RRF']['RESOURCE2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 2]), 'val': [1, 2, 840, 10003, 7, 2]}
oids['Z3950']['ACCESS']['PROMPT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 1]), 'val': [1, 2, 840, 10003, 8, 1]}
oids['Z3950']['ACCESS']['DES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 2]), 'val': [1, 2, 840, 10003, 8, 2]}
oids['Z3950']['ACCESS']['KRB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 3]), 'val': [1, 2, 840, 10003, 8, 3]}
oids['Z3950']['ES']['PERSISTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 1]), 'val': [1, 2, 840, 10003, 9, 1]}
oids['Z3950']['ES']['PERSISTQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 2]), 'val': [1, 2, 840, 10003, 9, 2]}
oids['Z3950']['ES']['PERIODQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 3]), 'val': [1, 2, 840, 10003, 9, 3]}
oids['Z3950']['ES']['ITEMORDER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 4]), 'val': [1, 2, 840, 10003, 9, 4]}
oids['Z3950']['ES']['DBUPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5]), 'val': [1, 2, 840, 10003, 9, 5]}
oids['Z3950']['ES']['DBUPDATE']['REV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1]}
oids['Z3950']['ES']['DBUPDATE']['REV']['1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1, 1]}
oids['Z3950']['ES']['EXPORTSPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 6]), 'val': [1, 2, 840, 10003, 9, 6]}
oids['Z3950']['ES']['EXPORTINV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 7]), 'val': [1, 2, 840, 10003, 9, 7]}
oids['Z3950']['USR']['SEARCHRES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1]), 'val': [1, 2, 840, 10003, 10, 1]}
oids['Z3950']['USR']['CHARSETNEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 2]), 'val': [1, 2, 840, 10003, 10, 2]}
oids['Z3950']['USR']['INFO1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 3]), 'val': [1, 2, 840, 10003, 10, 3]}
oids['Z3950']['USR']['SEARCHTERMS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 4]), 'val': [1, 2, 840, 10003, 10, 4]}
oids['Z3950']['USR']['SEARCHTERMS2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 5]), 'val': [1, 2, 840, 10003, 10, 5]}
oids['Z3950']['USR']['DATETIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 6]), 'val': [1, 2, 840, 10003, 10, 6]}
oids['Z3950']['USR']['INSERTACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 9]), 'val': [1, 2, 840, 10003, 10, 9]}
oids['Z3950']['USR']['EDITACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 10]), 'val': [1, 2, 840, 10003, 10, 10]}
oids['Z3950']['USR']['AUTHFILE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 11]), 'val': [1, 2, 840, 10003, 10, 11]}
oids['Z3950']['USR']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000]), 'val': [1, 2, 840, 10003, 10, 1000]}
oids['Z3950']['USR']['PRIVATE']['OCLC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17]), 'val': [1, 2, 840, 10003, 10, 1000, 17]}
oids['Z3950']['USR']['PRIVATE']['OCLC']['INFO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1]), 'val': [1, 2, 840, 10003, 10, 1000, 17, 1]}
oids['Z3950']['SPEC']['ESPEC1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 1]), 'val': [1, 2, 840, 10003, 11, 1]}
oids['Z3950']['SPEC']['ESPEC2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 2]), 'val': [1, 2, 840, 10003, 11, 2]}
oids['Z3950']['SPEC']['ESPECQ'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 3]), 'val': [1, 2, 840, 10003, 11, 3]}
oids['Z3950']['VAR']['VARIANT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12, 1]), 'val': [1, 2, 840, 10003, 12, 1]}
oids['Z3950']['SCHEMA']['WAIS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 2]), 'val': [1, 2, 840, 10003, 13, 2]}
oids['Z3950']['SCHEMA']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 3]), 'val': [1, 2, 840, 10003, 13, 3]}
oids['Z3950']['SCHEMA']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 4]), 'val': [1, 2, 840, 10003, 13, 4]}
oids['Z3950']['SCHEMA']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 5]), 'val': [1, 2, 840, 10003, 13, 5]}
oids['Z3950']['SCHEMA']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 6]), 'val': [1, 2, 840, 10003, 13, 6]}
oids['Z3950']['SCHEMA']['HOLDINGS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7]), 'val': [1, 2, 840, 10003, 13, 7]}
oids['Z3950']['SCHEMA']['HOLDINGS']['11'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 1]), 'val': [1, 2, 840, 10003, 13, 7, 1]}
oids['Z3950']['SCHEMA']['HOLDINGS']['12'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 2]), 'val': [1, 2, 840, 10003, 13, 7, 2]}
oids['Z3950']['SCHEMA']['HOLDINGS']['14'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 4]), 'val': [1, 2, 840, 10003, 13, 7, 4]}
oids['Z3950']['SCHEMA']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['INSERT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['EDIT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['TAGSET']['M'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 1]), 'val': [1, 2, 840, 10003, 14, 1]}
oids['Z3950']['TAGSET']['G'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 2]), 'val': [1, 2, 840, 10003, 14, 2]}
oids['Z3950']['TAGSET']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 3]), 'val': [1, 2, 840, 10003, 14, 3]}
oids['Z3950']['TAGSET']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 4]), 'val': [1, 2, 840, 10003, 14, 4]}
oids['Z3950']['TAGSET']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 5]), 'val': [1, 2, 840, 10003, 14, 5]}
oids['Z3950']['TAGSET']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 6]), 'val': [1, 2, 840, 10003, 14, 6]}
oids['Z3950']['TAGSET']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 7]), 'val': [1, 2, 840, 10003, 14, 7]}
oids['Z3950']['TAGSET']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 8]), 'val': [1, 2, 840, 10003, 14, 8]}
oids['Z3950']['NEG']['CHARSET2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1]), 'val': [1, 2, 840, 10003, 15, 1]}
oids['Z3950']['NEG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 2]), 'val': [1, 2, 840, 10003, 15, 2]}
oids['Z3950']['NEG']['CHARSET3'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 3]), 'val': [1, 2, 840, 10003, 15, 3]}
oids['Z3950']['NEG']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000]), 'val': [1, 2, 840, 10003, 15, 1000]}
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81]), 'val': [1, 2, 840, 10003, 15, 1000, 81]}
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA']['CHARSETNAME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1]), 'val': [1, 2, 840, 10003, 15, 1000, 81, 1]}
oids['Z3950']['QUERY']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 1]), 'val': [1, 2, 840, 10003, 16, 1]}
oids['Z3950']['QUERY']['CQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 2]), 'val': [1, 2, 840, 10003, 16, 2]}
oids['UNICODE'] = {'oid': asn1.OidVal([1, 0, 10646]), 'val': [1, 0, 10646]}
oids['UNICODE']['PART1'] = {'oid': asn1.OidVal([1, 0, 10646, 1]), 'val': [1, 0, 10646, 1]}
oids['UNICODE']['PART1']['XFERSYN'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0]), 'val': [1, 0, 10646, 1, 0]}
oids['UNICODE']['PART1']['XFERSYN']['UCS2'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 2]), 'val': [1, 0, 10646, 1, 0, 2]}
oids['UNICODE']['PART1']['XFERSYN']['UCS4'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 4]), 'val': [1, 0, 10646, 1, 0, 4]}
oids['UNICODE']['PART1']['XFERSYN']['UTF16'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 5]), 'val': [1, 0, 10646, 1, 0, 5]}
oids['UNICODE']['PART1']['XFERSYN']['UTF8'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 8]), 'val': [1, 0, 10646, 1, 0, 8]}
UNICODE = [1, 0, 10646]
UNICODE_ov = asn1.OidVal([1, 0, 10646])
UNICODE_PART1 = [1, 0, 10646, 1]
UNICODE_PART1_ov = asn1.OidVal([1, 0, 10646, 1])
UNICODE_PART1_XFERSYN = [1, 0, 10646, 1, 0]
UNICODE_PART1_XFERSYN_ov = asn1.OidVal([1, 0, 10646, 1, 0])
UNICODE_PART1_XFERSYN_UCS2 = [1, 0, 10646, 1, 0, 2]
UNICODE_PART1_XFERSYN_UCS2_ov = asn1.OidVal([1, 0, 10646, 1, 0, 2])
UNICODE_PART1_XFERSYN_UCS4 = [1, 0, 10646, 1, 0, 4]
UNICODE_PART1_XFERSYN_UCS4_ov = asn1.OidVal([1, 0, 10646, 1, 0, 4])
UNICODE_PART1_XFERSYN_UTF16 = [1, 0, 10646, 1, 0, 5]
UNICODE_PART1_XFERSYN_UTF16_ov = asn1.OidVal([1, 0, 10646, 1, 0, 5])
UNICODE_PART1_XFERSYN_UTF8 = [1, 0, 10646, 1, 0, 8]
UNICODE_PART1_XFERSYN_UTF8_ov = asn1.OidVal([1, 0, 10646, 1, 0, 8])
Z3950 = [1, 2, 840, 10003]
Z3950_ov = asn1.OidVal([1, 2, 840, 10003])
Z3950_ACCESS = [1, 2, 840, 10003, 8]
Z3950_ACCESS_ov = asn1.OidVal([1, 2, 840, 10003, 8])
Z3950_ACCESS_DES1 = [1, 2, 840, 10003, 8, 2]
Z3950_ACCESS_DES1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 2])
Z3950_ACCESS_KRB1 = [1, 2, 840, 10003, 8, 3]
Z3950_ACCESS_KRB1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 3])
Z3950_ACCESS_PROMPT1 = [1, 2, 840, 10003, 8, 1]
Z3950_ACCESS_PROMPT1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 1])
Z3950_ATTRS = [1, 2, 840, 10003, 3]
Z3950_ATTRS_ov = asn1.OidVal([1, 2, 840, 10003, 3])
Z3950_ATTRS_BIB1 = [1, 2, 840, 10003, 3, 1]
Z3950_ATTRS_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 1])
Z3950_ATTRS_BIB2 = [1, 2, 840, 10003, 3, 18]
Z3950_ATTRS_BIB2_ov = asn1.OidVal([1, 2, 840, 10003, 3, 18])
Z3950_ATTRS_CCL1 = [1, 2, 840, 10003, 3, 4]
Z3950_ATTRS_CCL1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 4])
Z3950_ATTRS_CIMI1 = [1, 2, 840, 10003, 3, 8]
Z3950_ATTRS_CIMI1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 8])
Z3950_ATTRS_COLLECTIONS1 = [1, 2, 840, 10003, 3, 7]
Z3950_ATTRS_COLLECTIONS1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 7])
Z3950_ATTRS_DAN1 = [1, 2, 840, 10003, 3, 15]
Z3950_ATTRS_DAN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 15])
Z3950_ATTRS_EXP1 = [1, 2, 840, 10003, 3, 2]
Z3950_ATTRS_EXP1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 2])
Z3950_ATTRS_EXT1 = [1, 2, 840, 10003, 3, 3]
Z3950_ATTRS_EXT1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 3])
Z3950_ATTRS_FIN1 = [1, 2, 840, 10003, 3, 14]
Z3950_ATTRS_FIN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 14])
Z3950_ATTRS_GEO = [1, 2, 840, 10003, 3, 9]
Z3950_ATTRS_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 3, 9])
Z3950_ATTRS_GILS = [1, 2, 840, 10003, 3, 5]
Z3950_ATTRS_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 5])
Z3950_ATTRS_HOLD = [1, 2, 840, 10003, 3, 16]
Z3950_ATTRS_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 3, 16])
Z3950_ATTRS_MARC = [1, 2, 840, 10003, 3, 17]
Z3950_ATTRS_MARC_ov = asn1.OidVal([1, 2, 840, 10003, 3, 17])
Z3950_ATTRS_STAS = [1, 2, 840, 10003, 3, 6]
Z3950_ATTRS_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 6])
Z3950_ATTRS_UTIL = [1, 2, 840, 10003, 3, 11]
Z3950_ATTRS_UTIL_ov = asn1.OidVal([1, 2, 840, 10003, 3, 11])
Z3950_ATTRS_XD1 = [1, 2, 840, 10003, 3, 12]
Z3950_ATTRS_XD1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 12])
Z3950_ATTRS_ZBIG = [1, 2, 840, 10003, 3, 10]
Z3950_ATTRS_ZBIG_ov = asn1.OidVal([1, 2, 840, 10003, 3, 10])
Z3950_ATTRS_ZEEREX = [1, 2, 840, 10003, 3, 19]
Z3950_ATTRS_ZEEREX_ov = asn1.OidVal([1, 2, 840, 10003, 3, 19])
Z3950_ATTRS_ZTHES = [1, 2, 840, 10003, 3, 13]
Z3950_ATTRS_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 3, 13])
Z3950_DIAG = [1, 2, 840, 10003, 4]
Z3950_DIAG_ov = asn1.OidVal([1, 2, 840, 10003, 4])
Z3950_DIAG_BIB1 = [1, 2, 840, 10003, 4, 1]
Z3950_DIAG_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 1])
Z3950_DIAG_DIAG1 = [1, 2, 840, 10003, 4, 2]
Z3950_DIAG_DIAG1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 2])
Z3950_DIAG_ES = [1, 2, 840, 10003, 4, 3]
Z3950_DIAG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 4, 3])
Z3950_DIAG_GENERAL = [1, 2, 840, 10003, 4, 4]
Z3950_DIAG_GENERAL_ov = asn1.OidVal([1, 2, 840, 10003, 4, 4])
Z3950_ES = [1, 2, 840, 10003, 9]
Z3950_ES_ov = asn1.OidVal([1, 2, 840, 10003, 9])
Z3950_ES_DBUPDATE = [1, 2, 840, 10003, 9, 5]
Z3950_ES_DBUPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5])
Z3950_ES_DBUPDATE_REV = [1, 2, 840, 10003, 9, 5, 1]
Z3950_ES_DBUPDATE_REV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1])
Z3950_ES_DBUPDATE_REV_1 = [1, 2, 840, 10003, 9, 5, 1, 1]
Z3950_ES_DBUPDATE_REV_1_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1])
Z3950_ES_EXPORTINV = [1, 2, 840, 10003, 9, 7]
Z3950_ES_EXPORTINV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 7])
Z3950_ES_EXPORTSPEC = [1, 2, 840, 10003, 9, 6]
Z3950_ES_EXPORTSPEC_ov = asn1.OidVal([1, 2, 840, 10003, 9, 6])
Z3950_ES_ITEMORDER = [1, 2, 840, 10003, 9, 4]
Z3950_ES_ITEMORDER_ov = asn1.OidVal([1, 2, 840, 10003, 9, 4])
Z3950_ES_PERIODQRY = [1, 2, 840, 10003, 9, 3]
Z3950_ES_PERIODQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 3])
Z3950_ES_PERSISTQRY = [1, 2, 840, 10003, 9, 2]
Z3950_ES_PERSISTQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 2])
Z3950_ES_PERSISTRS = [1, 2, 840, 10003, 9, 1]
Z3950_ES_PERSISTRS_ov = asn1.OidVal([1, 2, 840, 10003, 9, 1])
Z3950_NEG = [1, 2, 840, 10003, 15]
Z3950_NEG_ov = asn1.OidVal([1, 2, 840, 10003, 15])
Z3950_NEG_CHARSET2 = [1, 2, 840, 10003, 15, 1]
Z3950_NEG_CHARSET2_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1])
Z3950_NEG_CHARSET3 = [1, 2, 840, 10003, 15, 3]
Z3950_NEG_CHARSET3_ov = asn1.OidVal([1, 2, 840, 10003, 15, 3])
Z3950_NEG_ES = [1, 2, 840, 10003, 15, 2]
Z3950_NEG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 15, 2])
Z3950_NEG_PRIVATE = [1, 2, 840, 10003, 15, 1000]
Z3950_NEG_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000])
Z3950_NEG_PRIVATE_INDEXDATA = [1, 2, 840, 10003, 15, 1000, 81]
Z3950_NEG_PRIVATE_INDEXDATA_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81])
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME = [1, 2, 840, 10003, 15, 1000, 81, 1]
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1])
Z3950_QUERY = [1, 2, 840, 10003, 16]
Z3950_QUERY_ov = asn1.OidVal([1, 2, 840, 10003, 16])
Z3950_QUERY_CQL = [1, 2, 840, 10003, 16, 2]
Z3950_QUERY_CQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 2])
Z3950_QUERY_SQL = [1, 2, 840, 10003, 16, 1]
Z3950_QUERY_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 1])
Z3950_RECSYN = [1, 2, 840, 10003, 5]
Z3950_RECSYN_ov = asn1.OidVal([1, 2, 840, 10003, 5])
Z3950_RECSYN_AUSMARC = [1, 2, 840, 10003, 5, 20]
Z3950_RECSYN_AUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 20])
Z3950_RECSYN_CANMARC = [1, 2, 840, 10003, 5, 17]
Z3950_RECSYN_CANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 17])
Z3950_RECSYN_CATMARC = [1, 2, 840, 10003, 5, 22]
Z3950_RECSYN_CATMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 22])
Z3950_RECSYN_CCF = [1, 2, 840, 10003, 5, 3]
Z3950_RECSYN_CCF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 3])
Z3950_RECSYN_COMARC = [1, 2, 840, 10003, 5, 33]
Z3950_RECSYN_COMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 33])
Z3950_RECSYN_DANMARC = [1, 2, 840, 10003, 5, 14]
Z3950_RECSYN_DANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 14])
Z3950_RECSYN_ES = [1, 2, 840, 10003, 5, 106]
Z3950_RECSYN_ES_ov = asn1.OidVal([1, 2, 840, 10003, 5, 106])
Z3950_RECSYN_EXPLAIN = [1, 2, 840, 10003, 5, 100]
Z3950_RECSYN_EXPLAIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 100])
Z3950_RECSYN_FINMARC = [1, 2, 840, 10003, 5, 15]
Z3950_RECSYN_FINMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 15])
Z3950_RECSYN_FINMARC2000 = [1, 2, 840, 10003, 5, 31]
Z3950_RECSYN_FINMARC2000_ov = asn1.OidVal([1, 2, 840, 10003, 5, 31])
Z3950_RECSYN_FRAGMENT = [1, 2, 840, 10003, 5, 107]
Z3950_RECSYN_FRAGMENT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 107])
Z3950_RECSYN_GRS0 = [1, 2, 840, 10003, 5, 104]
Z3950_RECSYN_GRS0_ov = asn1.OidVal([1, 2, 840, 10003, 5, 104])
Z3950_RECSYN_GRS1 = [1, 2, 840, 10003, 5, 105]
Z3950_RECSYN_GRS1_ov = asn1.OidVal([1, 2, 840, 10003, 5, 105])
Z3950_RECSYN_HUNMARC = [1, 2, 840, 10003, 5, 29]
Z3950_RECSYN_HUNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 29])
Z3950_RECSYN_IBERMARC = [1, 2, 840, 10003, 5, 21]
Z3950_RECSYN_IBERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 21])
Z3950_RECSYN_INTERMARC = [1, 2, 840, 10003, 5, 2]
Z3950_RECSYN_INTERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 2])
Z3950_RECSYN_ISDSMARC = [1, 2, 840, 10003, 5, 27]
Z3950_RECSYN_ISDSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 27])
Z3950_RECSYN_JPMARC = [1, 2, 840, 10003, 5, 24]
Z3950_RECSYN_JPMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 24])
Z3950_RECSYN_LIBRISMARC = [1, 2, 840, 10003, 5, 13]
Z3950_RECSYN_LIBRISMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 13])
Z3950_RECSYN_MAB = [1, 2, 840, 10003, 5, 16]
Z3950_RECSYN_MAB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 16])
Z3950_RECSYN_MALMARC = [1, 2, 840, 10003, 5, 23]
Z3950_RECSYN_MALMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 23])
Z3950_RECSYN_MARC21FIN = [1, 2, 840, 10003, 5, 32]
Z3950_RECSYN_MARC21FIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 32])
Z3950_RECSYN_MIME = [1, 2, 840, 10003, 5, 109]
Z3950_RECSYN_MIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109])
Z3950_RECSYN_MIME_GIF = [1, 2, 840, 10003, 5, 109, 5]
Z3950_RECSYN_MIME_GIF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 5])
Z3950_RECSYN_MIME_HTML = [1, 2, 840, 10003, 5, 109, 3]
Z3950_RECSYN_MIME_HTML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 3])
Z3950_RECSYN_MIME_JPEG = [1, 2, 840, 10003, 5, 109, 6]
Z3950_RECSYN_MIME_JPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 6])
Z3950_RECSYN_MIME_MPEG = [1, 2, 840, 10003, 5, 109, 8]
Z3950_RECSYN_MIME_MPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 8])
Z3950_RECSYN_MIME_PDF = [1, 2, 840, 10003, 5, 109, 1]
Z3950_RECSYN_MIME_PDF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 1])
Z3950_RECSYN_MIME_PNG = [1, 2, 840, 10003, 5, 109, 7]
Z3950_RECSYN_MIME_PNG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 7])
Z3950_RECSYN_MIME_POSTSCRIPT = [1, 2, 840, 10003, 5, 109, 2]
Z3950_RECSYN_MIME_POSTSCRIPT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 2])
Z3950_RECSYN_MIME_SGML = [1, 2, 840, 10003, 5, 109, 9]
Z3950_RECSYN_MIME_SGML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 9])
Z3950_RECSYN_MIME_TIFF = [1, 2, 840, 10003, 5, 109, 4]
Z3950_RECSYN_MIME_TIFF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 4])
Z3950_RECSYN_MIME_XML = [1, 2, 840, 10003, 5, 109, 10]
Z3950_RECSYN_MIME_XML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 10])
Z3950_RECSYN_NACSISCATP = [1, 2, 840, 10003, 5, 30]
Z3950_RECSYN_NACSISCATP_ov = asn1.OidVal([1, 2, 840, 10003, 5, 30])
Z3950_RECSYN_NORMARC = [1, 2, 840, 10003, 5, 12]
Z3950_RECSYN_NORMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 12])
Z3950_RECSYN_OPAC = [1, 2, 840, 10003, 5, 102]
Z3950_RECSYN_OPAC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 102])
Z3950_RECSYN_PICAMARC = [1, 2, 840, 10003, 5, 19]
Z3950_RECSYN_PICAMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 19])
Z3950_RECSYN_RUSMARC = [1, 2, 840, 10003, 5, 28]
Z3950_RECSYN_RUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 28])
Z3950_RECSYN_SBNMARC = [1, 2, 840, 10003, 5, 18]
Z3950_RECSYN_SBNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 18])
Z3950_RECSYN_SIGLEMARC = [1, 2, 840, 10003, 5, 26]
Z3950_RECSYN_SIGLEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 26])
Z3950_RECSYN_SQL = [1, 2, 840, 10003, 5, 111]
Z3950_RECSYN_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 5, 111])
Z3950_RECSYN_SUMMARY = [1, 2, 840, 10003, 5, 103]
Z3950_RECSYN_SUMMARY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 103])
Z3950_RECSYN_SUTRS = [1, 2, 840, 10003, 5, 101]
Z3950_RECSYN_SUTRS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 101])
Z3950_RECSYN_SWEMARC = [1, 2, 840, 10003, 5, 25]
Z3950_RECSYN_SWEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 25])
Z3950_RECSYN_UKMARC = [1, 2, 840, 10003, 5, 11]
Z3950_RECSYN_UKMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 11])
Z3950_RECSYN_UNIMARC = [1, 2, 840, 10003, 5, 1]
Z3950_RECSYN_UNIMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 1])
Z3950_RECSYN_USMARC = [1, 2, 840, 10003, 5, 10]
Z3950_RECSYN_USMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10])
Z3950_RECSYN_USMARC_AUTH = [1, 2, 840, 10003, 5, 10, 2]
Z3950_RECSYN_USMARC_AUTH_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 2])
Z3950_RECSYN_USMARC_BIBLIO = [1, 2, 840, 10003, 5, 10, 1]
Z3950_RECSYN_USMARC_BIBLIO_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 1])
Z3950_RECSYN_USMARC_CLASS = [1, 2, 840, 10003, 5, 10, 5]
Z3950_RECSYN_USMARC_CLASS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 5])
Z3950_RECSYN_USMARC_COMMUNITY = [1, 2, 840, 10003, 5, 10, 4]
Z3950_RECSYN_USMARC_COMMUNITY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 4])
Z3950_RECSYN_USMARC_HOLD = [1, 2, 840, 10003, 5, 10, 3]
Z3950_RECSYN_USMARC_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 3])
Z3950_RECSYN_ZMIME = [1, 2, 840, 10003, 5, 110]
Z3950_RECSYN_ZMIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110])
Z3950_RECSYN_ZMIME_TIFFB = [1, 2, 840, 10003, 5, 110, 1]
Z3950_RECSYN_ZMIME_TIFFB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 1])
Z3950_RECSYN_ZMIME_WAV = [1, 2, 840, 10003, 5, 110, 2]
Z3950_RECSYN_ZMIME_WAV_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 2])
Z3950_RRF = [1, 2, 840, 10003, 7]
Z3950_RRF_ov = asn1.OidVal([1, 2, 840, 10003, 7])
Z3950_RRF_RESOURCE1 = [1, 2, 840, 10003, 7, 1]
Z3950_RRF_RESOURCE1_ov = asn1.OidVal([1, 2, 840, 10003, 7, 1])
Z3950_RRF_RESOURCE2 = [1, 2, 840, 10003, 7, 2]
Z3950_RRF_RESOURCE2_ov = asn1.OidVal([1, 2, 840, 10003, 7, 2])
Z3950_SCHEMA = [1, 2, 840, 10003, 13]
Z3950_SCHEMA_ov = asn1.OidVal([1, 2, 840, 10003, 13])
Z3950_SCHEMA_CIMI = [1, 2, 840, 10003, 13, 5]
Z3950_SCHEMA_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 13, 5])
Z3950_SCHEMA_COLLECTIONS = [1, 2, 840, 10003, 13, 3]
Z3950_SCHEMA_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 3])
Z3950_SCHEMA_EDIT = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_EDIT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_GEO = [1, 2, 840, 10003, 13, 4]
Z3950_SCHEMA_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 13, 4])
Z3950_SCHEMA_GILS = [1, 2, 840, 10003, 13, 2]
Z3950_SCHEMA_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 2])
Z3950_SCHEMA_HOLDINGS = [1, 2, 840, 10003, 13, 7]
Z3950_SCHEMA_HOLDINGS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7])
Z3950_SCHEMA_HOLDINGS_11 = [1, 2, 840, 10003, 13, 7, 1]
Z3950_SCHEMA_HOLDINGS_11_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 1])
Z3950_SCHEMA_HOLDINGS_12 = [1, 2, 840, 10003, 13, 7, 2]
Z3950_SCHEMA_HOLDINGS_12_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 2])
Z3950_SCHEMA_HOLDINGS_14 = [1, 2, 840, 10003, 13, 7, 4]
Z3950_SCHEMA_HOLDINGS_14_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 4])
Z3950_SCHEMA_INSERT = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_INSERT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_UPDATE = [1, 2, 840, 10003, 13, 6]
Z3950_SCHEMA_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 13, 6])
Z3950_SCHEMA_WAIS = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_WAIS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_ZTHES = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SPEC = [1, 2, 840, 10003, 11]
Z3950_SPEC_ov = asn1.OidVal([1, 2, 840, 10003, 11])
Z3950_SPEC_ESPEC1 = [1, 2, 840, 10003, 11, 1]
Z3950_SPEC_ESPEC1_ov = asn1.OidVal([1, 2, 840, 10003, 11, 1])
Z3950_SPEC_ESPEC2 = [1, 2, 840, 10003, 11, 2]
Z3950_SPEC_ESPEC2_ov = asn1.OidVal([1, 2, 840, 10003, 11, 2])
Z3950_SPEC_ESPECQ = [1, 2, 840, 10003, 11, 3]
Z3950_SPEC_ESPECQ_ov = asn1.OidVal([1, 2, 840, 10003, 11, 3])
Z3950_TAGSET = [1, 2, 840, 10003, 14]
Z3950_TAGSET_ov = asn1.OidVal([1, 2, 840, 10003, 14])
Z3950_TAGSET_CIMI = [1, 2, 840, 10003, 14, 6]
Z3950_TAGSET_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 14, 6])
Z3950_TAGSET_COLLECTIONS = [1, 2, 840, 10003, 14, 5]
Z3950_TAGSET_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 5])
Z3950_TAGSET_G = [1, 2, 840, 10003, 14, 2]
Z3950_TAGSET_G_ov = asn1.OidVal([1, 2, 840, 10003, 14, 2])
Z3950_TAGSET_GILS = [1, 2, 840, 10003, 14, 4]
Z3950_TAGSET_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 4])
Z3950_TAGSET_M = [1, 2, 840, 10003, 14, 1]
Z3950_TAGSET_M_ov = asn1.OidVal([1, 2, 840, 10003, 14, 1])
Z3950_TAGSET_STAS = [1, 2, 840, 10003, 14, 3]
Z3950_TAGSET_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 3])
Z3950_TAGSET_UPDATE = [1, 2, 840, 10003, 14, 7]
Z3950_TAGSET_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 14, 7])
Z3950_TAGSET_ZTHES = [1, 2, 840, 10003, 14, 8]
Z3950_TAGSET_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 14, 8])
Z3950_TRANSFER = [1, 2, 840, 10003, 6]
Z3950_TRANSFER_ov = asn1.OidVal([1, 2, 840, 10003, 6])
Z3950_USR = [1, 2, 840, 10003, 10]
Z3950_USR_ov = asn1.OidVal([1, 2, 840, 10003, 10])
Z3950_USR_AUTHFILE = [1, 2, 840, 10003, 10, 11]
Z3950_USR_AUTHFILE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 11])
Z3950_USR_CHARSETNEG = [1, 2, 840, 10003, 10, 2]
Z3950_USR_CHARSETNEG_ov = asn1.OidVal([1, 2, 840, 10003, 10, 2])
Z3950_USR_DATETIME = [1, 2, 840, 10003, 10, 6]
Z3950_USR_DATETIME_ov = asn1.OidVal([1, 2, 840, 10003, 10, 6])
Z3950_USR_EDITACTIONQUAL = [1, 2, 840, 10003, 10, 10]
Z3950_USR_EDITACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 10])
Z3950_USR_INFO1 = [1, 2, 840, 10003, 10, 3]
Z3950_USR_INFO1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 3])
Z3950_USR_INSERTACTIONQUAL = [1, 2, 840, 10003, 10, 9]
Z3950_USR_INSERTACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 9])
Z3950_USR_PRIVATE = [1, 2, 840, 10003, 10, 1000]
Z3950_USR_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000])
Z3950_USR_PRIVATE_OCLC = [1, 2, 840, 10003, 10, 1000, 17]
Z3950_USR_PRIVATE_OCLC_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17])
Z3950_USR_PRIVATE_OCLC_INFO = [1, 2, 840, 10003, 10, 1000, 17, 1]
Z3950_USR_PRIVATE_OCLC_INFO_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1])
Z3950_USR_SEARCHRES1 = [1, 2, 840, 10003, 10, 1]
Z3950_USR_SEARCHRES1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1])
Z3950_USR_SEARCHTERMS1 = [1, 2, 840, 10003, 10, 4]
Z3950_USR_SEARCHTERMS1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 4])
Z3950_USR_SEARCHTERMS2 = [1, 2, 840, 10003, 10, 5]
Z3950_USR_SEARCHTERMS2_ov = asn1.OidVal([1, 2, 840, 10003, 10, 5])
Z3950_VAR = [1, 2, 840, 10003, 12]
Z3950_VAR_ov = asn1.OidVal([1, 2, 840, 10003, 12])
Z3950_VAR_VARIANT1 = [1, 2, 840, 10003, 12, 1]
Z3950_VAR_VARIANT1_ov = asn1.OidVal([1, 2, 840, 10003, 12, 1])

View File

@ -1,260 +0,0 @@
#!/usr/local/bin/python2.3
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from PyZ3950 import z3950, oids,asn1
from PyZ3950.zdefs import make_attr
from types import IntType, StringType, ListType
from PyZ3950.CQLParser import CQLshlex
"""
Parser for PQF directly into RPN structure.
PQF docs: http://www.indexdata.dk/yaz/doc/tools.html
NB: This does not implement /everything/ in PQF, in particular: @attr 2=3 @and @attr 1=4 title @attr 1=1003 author (eg that 2 should be 3 for all subsequent clauses)
"""
class PQFParser:
lexer = None
currentToken = None
nextToken = None
def __init__(self, l):
self.lexer = l
self.fetch_token()
def fetch_token(self):
""" Read ahead one token """
tok = self.lexer.get_token()
self.currentToken = self.nextToken
self.nextToken = tok
def is_boolean(self):
if (self.currentToken.lower() in ['@and', '@or', '@not', '@prox']):
return 1
else:
return 0
def defaultClause(self, t):
# Assign a default clause: anywhere =
clause = z3950.AttributesPlusTerm()
attrs = [(oids.Z3950_ATTRS_BIB1, 1, 1016), (oids.Z3950_ATTRS_BIB1, 2, 3)]
clause.attributes = [make_attr(*e) for e in attrs]
clause.term = t
return ('op', ('attrTerm', clause))
# Grammar fns
def query(self):
set = self.top_set()
qst = self.query_struct()
# Pull in a (hopefully) null token
self.fetch_token()
if (self.currentToken):
# Nope, unprocessed tokens remain
raise(ValueError)
rpnq = z3950.RPNQuery()
if set:
rpnq.attributeSet = set
else:
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
rpnq.rpn = qst
return ('type_1', rpnq)
def top_set(self):
if (self.nextToken == '@attrset'):
self.fetch_token()
self.fetch_token()
n = self.currentToken.upper()
if (n[:14] == "1.2.840.10003."):
return asn1.OidVal(map(int, n.split('.')))
return oids.oids['Z3950']['ATTRS'][n]['oid']
else:
return None
# This totally ignores the BNF, but does the 'right' thing
def query_struct(self):
self.fetch_token()
if (self.currentToken == '@attr'):
attrs = []
while self.currentToken == '@attr':
attrs.append(self.attr_spec())
self.fetch_token()
t = self.term()
# Now we have attrs + term
clause = z3950.AttributesPlusTerm()
clause.attributes = [make_attr(*e) for e in attrs]
clause.term = t
return ('op', ('attrTerm', clause))
elif (self.is_boolean()):
# @operator query query
return self.complex()
elif (self.currentToken == '@set'):
return self.result_set()
elif (self.currentToken == "{"):
# Parens
s = self.query_struct()
if (self.nextToken <> "}"):
raise(ValueError)
else:
self.fetch_token()
return s
else:
t = self.term()
return self.defaultClause(t)
def term(self):
# Need to split to allow attrlist then @term
type = 'general'
if (self.currentToken == '@term'):
self.fetch_token()
type = self.currentToken.lower()
types = {'general' : 'general', 'string' : 'characterString', 'numeric' : 'numeric', 'external' : 'external'}
type = types[type]
self.fetch_token()
if (self.currentToken[0] == '"' and self.currentToken[-1] == '"'):
term = self.currentToken[1:-1]
else:
term = self.currentToken
return (type, term)
def result_set(self):
self.fetch_token()
return ('op', ('resultSet', self.currentToken))
def attr_spec(self):
# @attr is CT
self.fetch_token()
if (self.currentToken.find('=') == -1):
# attrset
set = self.currentToken
if (set[:14] == "1.2.840.10003."):
set = asn1.OidVal(map(int, set.split('.')))
else:
set = oids.oids['Z3950']['ATTRS'][set.upper()]['oid']
self.fetch_token()
else:
set = None
# May raise
(atype, val) = self.currentToken.split('=')
if (not atype.isdigit()):
raise ValueError
atype = int(atype)
if (val.isdigit()):
val = int(val)
return (set, atype, val)
def complex(self):
op = z3950.RpnRpnOp()
op.op = self.boolean()
op.rpn1 = self.query_struct()
op.rpn2 = self.query_struct()
return ('rpnRpnOp', op)
def boolean(self):
b = self.currentToken[1:]
b = b.lower()
if (b == 'prox'):
self.fetch_token()
exclusion = self.currentToken
self.fetch_token()
distance = self.currentToken
self.fetch_token()
ordered = self.currentToken
self.fetch_token()
relation = self.currentToken
self.fetch_token()
which = self.currentToken
self.fetch_token()
unit = self.currentToken
prox = z3950.ProximityOperator()
if (not (relation.isdigit() and exclusion.isdigit() and distance.isdigit() and unit.isdigit())):
raise ValueError
prox.relationType = int(relation)
prox.exclusion = bool(exclusion)
prox.distance = int(distance)
if (which[0] == 'k'):
prox.unit = ('known', int(unit))
elif (which[0] == 'p'):
prox.unit = ('private', int(unit))
else:
raise ValueError
return (b, prox)
elif b == 'not':
return ('and-not', None)
else:
return (b, None)
def parse(q):
query = StringIO(q)
lexer = CQLshlex(query)
# Override CQL's wordchars list to include /=><()
lexer.wordchars += "!@#$%^&*-+[];,.?|~`:\\><=/'()"
parser = PQFParser(lexer)
return parser.query()
def rpn2pqf(rpn):
# Turn RPN structure into PQF equivalent
q = rpn[1]
if (rpn[0] == 'type_1'):
# Top level
if (q.attributeSet):
query = '@attrset %s ' % ( '.'.join(map(str, q.attributeSet.lst)))
else:
query = ""
rest = rpn2pqf(q.rpn)
return "%s%s" % (query, rest)
elif (rpn[0] == 'rpnRpnOp'):
# boolean
if (q.op[0] in ['and', 'or']):
query = ['@', q.op[0], ' ']
elif (q.op[0] == 'and-not'):
query = ['@not ']
else:
query = ['@prox']
# XXX
query.append(' ')
query.append(rpn2pqf(q.rpn1))
query.append(' ')
query.append(rpn2pqf(q.rpn2))
return ''.join(query)
elif (rpn[0] == 'op'):
if (q[0] == 'attrTerm'):
query = []
for a in q[1].attributes:
if (a.attributeValue[0] == 'numeric'):
val = str(a.attributeValue[1])
else:
val = a.attributeValue[1].list[0][1]
query.append("@attr %i=%s " % (a.attributeType, val))
query.append('"%s" ' % (q[1].term[1]))
return ''.join(query)
elif (q[0] == 'resultSet'):
return "@set %s" % (q[1])

View File

@ -1,754 +0,0 @@
#!/usr/bin/env python
# This file should be available from
# http://www.pobox.com/~asl2/software/PyZ3950/
# and is licensed under the X Consortium license:
# Copyright (c) 2001, Aaron S. Lav, asl2@pobox.com
# All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, provided that the above
# copyright notice(s) and this permission notice appear in all copies of
# the Software and that both the above copyright notice(s) and this
# permission notice appear in supporting documentation.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
# OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
# INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Except as contained in this notice, the name of a copyright holder
# shall not be used in advertising or otherwise to promote the sale, use
# or other dealings in this Software without prior written authorization
# of the copyright holder.
# Change history:
# 2002/05/23
# Fix for Python2 compatibility. Thanks to Douglas Bates <bates@stat.wisc.edu>
# Fix to support SUTRS (requires asn1 updates, too)
# 2002/05/28
# Make SUTRS printing a little more useful
# Correctly close connection when done
# Handle receiving diagnostics instead of records a little better
"""<p>PyZ3950 currently is capable of sending and receiving v2 or v3 PDUs
Initialize, Search, Present, Scan, Sort, Close, and Delete. For client
work, you probably want to use ZOOM, which should be in the same
distribution as this file, in zoom.py. The Server class in this file
implements a server, but could use some work. Both interoperate with
the <a href="http://www.indexdata.dk/yaz"> Yaz toolkit</a> and the
client interoperates with a variety of libraries. <p>
Useful resources:
<ul>
<li><a href="http://lcweb.loc.gov/z3950/agency/">
Library of Congress Z39.50 Maintenance Agency Page</a></li>
<li><a href="http://lcweb.loc.gov/z3950/agency/document.html">
Official Specification</a></li>
<li><a href="http://www.loc.gov/z3950/agency/clarify/">Clarifications</a></li>
</ul>
"""
from __future__ import nested_scopes
import getopt
import sys
import exceptions
import random
import socket
import string
import traceback
import codecs
from PyZ3950 import asn1
from PyZ3950 import zmarc
from PyZ3950.zdefs import *
out_encoding = None
trace_recv = 0
trace_init = 0
print_hex = 0
class Z3950Error(Exception):
pass
# Note: following 3 exceptions are defaults, but can be changed by
# calling conn.set_exs
class ConnectionError(Z3950Error): # TCP or other transport error
pass
class ProtocolError(Z3950Error): # Unexpected message or badly formatted
pass
class UnexpectedCloseError(ProtocolError):
pass
vers = '0.62'
default_resultSetName = 'default'
DEFAULT_PORT = 2101
Z3950_VERS = 3 # This is a global switch: do we support V3 at all?
def extract_recs (resp):
(typ, recs) = resp.records
if (typ <> 'responseRecords'):
raise ProtocolError ("Bad records typ " + str (typ) + str (recs))
if len (recs) == 0:
raise ProtocolError ("No records")
fmtoid = None
extract = []
for r in recs:
(typ, data) = r.record
if (typ <> 'retrievalRecord'):
raise ProtocolError ("Bad typ %s data %s" % (str (typ), str(data)))
oid = data.direct_reference
if fmtoid == None:
fmtoid = oid
elif fmtoid <> oid:
raise ProtocolError (
"Differing OIDs %s %s" % (str (fmtoid), str (oid)))
# Not, strictly speaking, an error.
dat = data.encoding
(typ, dat) = dat
if (oid == Z3950_RECSYN_USMARC_ov):
if typ <> 'octet-aligned':
raise ProtocolError ("Weird record EXTERNAL MARC type: " + typ)
extract.append (dat)
return (fmtoid, extract)
def get_formatter (oid):
def printer (x):
print oid, repr (x)
def print_marc (marc):
print str (zmarc.MARC(marc))
def print_sutrs (x):
print "SUTRS:",
if isinstance (x, type ('')):
print x
elif isinstance (x, type (u'')):
if out_encoding == None:
print repr (x)
else:
try:
print x.encode (out_encoding)
except UnicodeError, u:
print "Cannot print %s in current encoding %s" % (
repr (x), out_encoding)
if oid == Z3950_RECSYN_SUTRS_ov:
return print_sutrs
if oid == Z3950_RECSYN_USMARC_ov:
return print_marc
else:
return printer
def disp_resp (resp):
try:
(fmtoid, recs) = extract_recs (resp)
except ProtocolError, val:
print "Bad records", str (val)
formatter = get_formatter (fmtoid)
for rec in recs:
formatter (rec)
class Conn:
rdsz = 65536
def __init__ (self, sock = None, ConnectionError = ConnectionError,
ProtocolError = ProtocolError, UnexpectedCloseError =
UnexpectedCloseError):
self.set_exns (ConnectionError, ProtocolError, UnexpectedCloseError)
if sock == None:
self.sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.decode_ctx = asn1.IncrementalDecodeCtx (APDU)
self.encode_ctx = asn1.Ctx ()
def set_exns (self, conn, protocol, unexp_close):
self.ConnectionError = conn
self.ProtocolError = protocol
self.UnexpectedCloseError = unexp_close
def set_codec (self, charset_name, charsets_in_records):
self.charset_name = charset_name
self.charsets_in_records = not not charsets_in_records # collapse None and 0
if trace_charset:
print "Setting up codec!", self.charset_name
strip_bom = self.charset_name == 'utf-16'
# XXX should create a new codec which wraps utf-16 but
# strips the Byte Order Mark, or use stream codecs
if self.charset_name <> None:
self.encode_ctx.set_codec (asn1.GeneralString,
codecs.lookup (self.charset_name),
strip_bom)
self.decode_ctx.set_codec (asn1.GeneralString,
codecs.lookup (self.charset_name),
strip_bom)
if not charsets_in_records: # None or 0
register_retrieval_record_oids(self.decode_ctx)
register_retrieval_record_oids(self.encode_ctx)
def readproc (self):
if self.sock == None:
raise self.ConnectionError ('disconnected')
try:
b = self.sock.recv (self.rdsz)
except socket.error, val:
self.sock = None
raise self.ConnectionError ('socket', str (val))
if len (b) == 0: # graceful close
self.sock = None
raise self.ConnectionError ('graceful close')
if trace_recv:
print map (lambda x: hex(ord(x)), b)
return b
def read_PDU (self):
while 1:
if self.decode_ctx.val_count () > 0:
return self.decode_ctx.get_first_decoded ()
try:
b = self.readproc ()
self.decode_ctx.feed (map (ord, b))
except asn1.BERError, val:
raise self.ProtocolError ('ASN1 BER', str(val))
class Server (Conn):
test = 0
def __init__ (self, sock):
Conn.__init__ (self, sock)
self.expecting_init = 1
self.done = 0
self.result_sets = {}
self.charset_name = None
def run (self):
while not self.done:
(typ, val) = self.read_PDU ()
fn = self.fn_dict.get (typ, None)
if fn == None:
raise self.ProtocolError ("Bad typ", typ + " " + str (val))
if typ <> 'initRequest' and self.expecting_init:
raise self.ProtocolError ("Init expected", typ)
fn (self, val)
def send (self, val):
b = self.encode_ctx.encode (APDU, val)
if self.test:
print "Internal Testing"
# a reminder not to leave this switched on by accident
self.decode_ctx.feed (b)
decoded = self.read_PDU ()
assert (val== decoded)
self.sock.send (b)
def do_close (self, reason, info):
close = Close ()
close.closeReason = reason
close.diagnosticInformation = info
self.send (('close', close))
def close (self, parm):
self.done = 1
self.do_close (0, 'Normal close')
def search_child (self, query):
return range (random.randint (2,10))
def search (self, sreq):
if sreq.replaceIndicator == 0 and self.result_sets.has_key (
sreq.resultSetName):
raise self.ProtocolError ("replaceIndicator 0")
result = self.search_child (sreq.query)
sresp = SearchResponse ()
self.result_sets[sreq.resultSetName] = result
sresp.resultCount = len (result)
sresp.numberOfRecordsReturned = 0
sresp.nextResultSetPosition = 1
sresp.searchStatus = 1
sresp.resultSetStatus = 0
sresp.presentStatus = PresentStatus.get_num_from_name ('success')
sresp.records = ('responseRecords', [])
self.send (('searchResponse', sresp))
def format_records (self, start, count, res_set, prefsyn):
l = []
for i in range (start - 1, start + count - 1):
elt = res_set[i]
elt_external = asn1.EXTERNAL ()
elt_external.direct_reference = Z3950_RECSYN_SUTRS_ov
# Not only has this text been extensively translated, but
# it also prefigures Z39.50's separation of Search and Present,
# once rearranged a little.
strings = [
'seek, and ye shall find; ask, and it shall be given you',
u"""Car quiconque demande re\u00e7oit, qui cherche trouve, et \u00e0 quit frappe on ouvrira""", # This (next) verse has non-ASCII characters
u"\u0391\u03b9\u03c4\u03b5\u03b9\u03c4\u03b5, "
u"\u03ba\u03b1\u03b9 \u03b4\u03bf\u03b8\u03b7\u03c3\u03b5\u03c4\u03b1\u03b9 "+
u"\u03c5\u03bc\u03b9\u03bd; \u03b6\u03b7\u03c4\u03b5\u03b9\u03c4\u03b5 " +
u"\u03ba\u03b1\u03b9 \u03b5\u03c5\u03c1\u03b7\u03c3\u03b5\u03c4\u03b5",
u"\u05e8\u05d0\u05d4 \u05d6\u05d4 \u05de\u05e6\u05d0\u05ea\u05d9"]
if self.charsets_in_records:
encode_charset = self.charset_name
else:
encode_charset = 'ascii'
def can_encode (s):
try:
s.encode (encode_charset)
except UnicodeError:
return 0
return 1
if self.charset_name == None:
candidate_strings = [strings[0]]
else:
candidate_strings = [s for s in strings if can_encode (s)]
# Note: this code is for debugging/testing purposes. Usually,
# language/content selection should not be made on the
# basis of the selected charset, and a surrogate diagnostic
# should be generated if the data cannot be encoded.
text = random.choice (candidate_strings)
add_str = " #%d charset %s cir %d" % (elt, encode_charset,
self.charsets_in_records)
elt_external.encoding = ('single-ASN1-type', text + add_str)
n = NamePlusRecord ()
n.name = 'foo'
n.record = ('retrievalRecord', elt_external)
l.append (n)
return l
def present (self, preq):
presp = PresentResponse ()
res_set = self.result_sets [preq.resultSetId]
presp.numberOfRecordsReturned = preq.numberOfRecordsRequested
presp.nextResultSetPosition = preq.resultSetStartPoint + \
preq.numberOfRecordsRequested
presp.presentStatus = 0
presp.records = ('responseRecords',
self.format_records (preq.resultSetStartPoint,
preq.numberOfRecordsRequested,
res_set,
preq.preferredRecordSyntax))
self.send (('presentResponse', presp))
def init (self, ireq):
if trace_init:
print "Init received", ireq
self.v3_flag = (ireq.protocolVersion ['version_3'] and
Z3950_VERS == 3)
ir = InitializeResponse ()
ir.protocolVersion = ProtocolVersion ()
ir.protocolVersion ['version_1'] = 1
ir.protocolVersion ['version_2'] = 1
ir.protocolVersion ['version_3'] = self.v3_flag
val = get_charset_negot (ireq)
charset_name = None
records_in_charsets = 0
if val <> None:
csreq = CharsetNegotReq ()
csreq.unpack_proposal (val)
def rand_choose (list_or_none):
if list_or_none == None or len (list_or_none) == 0:
return None
return random.choice (list_or_none)
charset_name = rand_choose (csreq.charset_list)
if charset_name <> None:
try:
codecs.lookup (charset_name)
except LookupError, l:
charset_name = None
csresp = CharsetNegotResp (
charset_name,
rand_choose (csreq.lang_list),
csreq.records_in_charsets)
records_in_charsets = csresp.records_in_charsets
if trace_charset:
print csreq, csresp
set_charset_negot (ir, csresp.pack_negot_resp (), self.v3_flag)
optionslist = ['search', 'present', 'delSet', 'scan','negotiation']
ir.options = Options ()
for o in optionslist:
ir.options[o] = 1
ir.preferredMessageSize = 0
ir.exceptionalRecordSize = 0
# z9350-2001 3.2.1.1.4, 0 means client should be prepared to accept
# arbitrarily long messages.
ir.implementationId = implementationId
ir.implementationName = 'PyZ3950 Test server'
ir.implementationVersion = impl_vers
ir.result = 1
if trace_charset or trace_init:
print ir
self.expecting_init = 0
self.send (('initResponse', ir))
self.set_codec (charset_name, records_in_charsets)
def sort (self, sreq):
sresp = SortResponse ()
sresp.sortStatus = 0
self.send (('sortResponse', sresp))
def delete (self, dreq):
dresp = DeleteResultSetResponse ()
dresp.deleteOperationStatus = 0
self.send (('deleteResultSetResponse', dresp))
def esrequest (self, esreq):
print "ES", esreq
esresp = ExtendedServicesResponse ()
esresp.operationStatus = ExtendedServicesResponse['operationStatus'].get_num_from_name ('failure')
self.send (('extendedServicesResponse', esresp))
fn_dict = {'searchRequest': search,
'presentRequest': present,
'initRequest' : init,
'close' : close,
'sortRequest' : sort,
'deleteResultSetRequest' : delete,
'extendedServicesRequest': esrequest}
def run_server (test = 0):
listen = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
listen.setsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen.bind (('', DEFAULT_PORT))
listen.listen (1)
while 1:
(sock,addr) = listen.accept ()
try:
serv = Server (sock)
serv.test = test
serv.run ()
except:
(typ, val, tb) = sys.exc_info ()
if typ == exceptions.KeyboardInterrupt:
print "kbd interrupt, leaving"
raise
print "error %s %s from %s" % (typ, val, addr)
traceback.print_exc(40)
sock.close ()
def extract_apt (rpnQuery):
"""Takes RPNQuery to AttributePlusTerm"""
RPNStruct = rpnQuery.rpn
assert (RPNStruct [0] == 'op')
operand = RPNStruct [1]
assert (operand [0] == 'attrTerm')
return operand [1]
class Client (Conn):
test = 0
def __init__ (self, addr, port = DEFAULT_PORT, optionslist = None,
charset = None, lang = None, user = None, password = None,
preferredMessageSize = 0x100000, group = None,
maximumRecordSize = 0x100000, implementationId = "",
implementationName = "", implementationVersion = "",
ConnectionError = ConnectionError,
ProtocolError = ProtocolError,
UnexpectedCloseError = UnexpectedCloseError):
Conn.__init__ (self, ConnectionError = ConnectionError,
ProtocolError = ProtocolError,
UnexpectedCloseError = UnexpectedCloseError)
try:
self.sock.connect ((addr, port))
except socket.error, val:
self.sock = None
raise self.ConnectionError ('socket', str(val))
try_v3 = Z3950_VERS == 3
if (charset and not isinstance(charset, list)):
charset = [charset]
if (lang and not isinstance(lang, list)):
charset = [lang]
negotiate_charset = charset or lang
if (user or password or group):
authentication = (user, password, group)
else:
authentication = None
InitReq = make_initreq (optionslist, authentication = authentication,
v3 = try_v3,
preferredMessageSize = preferredMessageSize,
maximumRecordSize = maximumRecordSize,
implementationId = implementationId,
implementationName = implementationName,
implementationVersion = implementationVersion,
negotiate_charset = negotiate_charset)
if negotiate_charset:
# languages = ['eng', 'fre', 'enm']
# Thanne longen folk to looken in catalogues
# and clerkes for to seken straunge bookes ...
cnr = CharsetNegotReq (charset, lang, random.choice((0,1,None)))
if trace_charset:
print cnr
set_charset_negot (InitReq, cnr.pack_proposal (), try_v3)
if trace_init:
print "Initialize request", InitReq
self.initresp = self.transact (
('initRequest', InitReq), 'initResponse')
if trace_init:
print "Initialize Response", self.initresp
self.v3_flag = self.initresp.protocolVersion ['version_3']
val = get_charset_negot (self.initresp)
if val <> None:
csr = CharsetNegotResp ()
csr.unpack_negot_resp (val)
if trace_charset:
print "Got csr", str (csr)
self.set_codec (csr.charset, csr.records_in_charsets)
self.search_results = {}
self.max_to_request = 20
self.default_recordSyntax = Z3950_RECSYN_USMARC_ov
def get_option (self, option_name):
return self.initresp.options[option_name]
def transact (self, to_send, expected):
b = self.encode_ctx.encode (APDU, to_send)
if print_hex:
print map (hex, b)
if self.test:
print "Internal Testing"
# a reminder not to leave this switched on by accident
self.decode_ctx.feed (b)
decoded = self.read_PDU ()
print "to_send", to_send, "decoded", decoded
assert (to_send == decoded)
if self.sock == None:
raise self.ConnectionError ('disconnected')
try:
self.sock.send (b)
except socket.error, val:
self.sock = None
raise self.ConnectionError('socket', str(val))
if expected == None:
return
pdu = self.read_PDU ()
(arm, val) = pdu
if self.test:
print "Internal Testing 2"
b = self.encode_ctx.encode (APDU, (arm, val))
self.decode_ctx.feed (b)
redecoded = self.read_PDU ()
if redecoded <> (arm, val):
print "Redecoded", redecoded
print "old", (arm, val)
assert (redecoded == (arm, val))
if arm == expected: # may be 'close'
return val
elif arm == 'close':
raise self.UnexpectedCloseError (
"Server closed connection reason %d diag info %s" % \
(getattr (val, 'closeReason', -1),
getattr (val, 'diagnosticInformation', 'None given')))
else:
raise self.ProtocolError (
"Unexpected response from server %s %s " % (expected,
repr ((arm, val))))
def set_dbnames (self, dbnames):
self.dbnames = dbnames
def search_2 (self, query, rsn = default_resultSetName, **kw):
# We used to check self.initresp.options['search'], but
# support for search is required by the standard, and
# www.cnshb.ru:210 doesn't set the search bit if you negotiate
# v2, but supports search anyway
sreq = make_sreq (query, self.dbnames, rsn, **kw)
recv = self.transact (('searchRequest', sreq), 'searchResponse')
self.search_results [rsn] = recv
return recv
def search (self, query, rsn = default_resultSetName, **kw):
# for backwards compat
recv = self.search_2 (('type_1', query), rsn, **kw)
return recv.searchStatus and (recv.resultCount > 0)
# If searchStatus is failure, check result-set-status -
# -subset - partial, valid results available
# -interim - partial, not necessarily valid
# -none - no result set
# If searchStatus is success, check present-status:
# - success - OK
# - partial-1 - not all, access control
# - partial-2 - not all, won't fit in msg size (but we currently don't ask for
# any records in search, shouldn't happen)
# - partial-3 - not all, resource control (origin)
# - partial-4 - not all, resource control (target)
# - failure - no records, nonsurrogate diagnostic.
def get_count (self, rsn = default_resultSetName):
return self.search_results[rsn].resultCount
def delete (self, rsn):
if not self.initresp.options['delSet']:
return None
delreq = DeleteResultSetRequest ()
delreq.deleteFunction = 0 # list
delreq.resultSetList = [rsn]
return self.transact (('deleteResultSetRequest', delreq),
'deleteResultSetResponse')
def present (self, rsn= default_resultSetName, start = None,
count = None, recsyn = None, esn = None):
# don't check for support in init resp: see search for reasoning
# XXX Azaroth 2004-01-08. This does work when rs is result of sort.
try:
sresp = self.search_results [rsn]
if start == None:
start = sresp.nextResultSetPosition
if count == None:
count = sresp.resultCount
if self.max_to_request > 0:
count = min (self.max_to_request, count)
except:
pass
if recsyn == None:
recsyn = self.default_recordSyntax
preq = PresentRequest ()
preq.resultSetId = rsn
preq.resultSetStartPoint = start
preq.numberOfRecordsRequested = count
preq.preferredRecordSyntax = recsyn
if esn <> None:
preq.recordComposition = ('simple', esn)
return self.transact (('presentRequest', preq), 'presentResponse')
def scan (self, query, **kw):
sreq = ScanRequest ()
sreq.databaseNames = self.dbnames
assert (query[0] == 'type_1' or query [0] == 'type_101')
sreq.attributeSet = query[1].attributeSet
sreq.termListAndStartPoint = extract_apt (query[1])
sreq.numberOfTermsRequested = 20 # default
for (key, val) in kw.items ():
setattr (sreq, key, val)
return self.transact (('scanRequest', sreq), 'scanResponse')
def close (self):
close = Close ()
close.closeReason = 0
close.diagnosticInformation = 'Normal close'
try:
rv = self.transact (('close', close), 'close')
except self.ConnectionError:
rv = None
if self.sock <> None:
self.sock.close ()
self.sock = None
return rv
def mk_compound_query ():
aelt1 = AttributeElement (attributeType = 1,
attributeValue = ('numeric',4))
apt1 = AttributesPlusTerm ()
apt1.attributes = [aelt1]
apt1.term = ('general', '1066')
aelt2 = AttributeElement (attributeType = 1,
attributeValue = ('numeric', 1))
apt2 = AttributesPlusTerm ()
apt2.attributes = [aelt2]
apt2.term = ('general', 'Sellar')
myrpnRpnOp = RpnRpnOp ()
myrpnRpnOp.rpn1 = ('op', ('attrTerm', apt1))
myrpnRpnOp.rpn2 = ('op', ('attrTerm', apt2))
myrpnRpnOp.op = ('and', None)
rpnq = RPNQuery (attributeSet = Z3950_ATTRS_BIB1_ov)
rpnq.rpn = ('rpnRpnOp', myrpnRpnOp)
return rpnq
def mk_simple_query (title):
aelt1 = AttributeElement (attributeType = 1,
attributeValue = ('numeric', 1003))
apt1 = AttributesPlusTerm ()
apt1.attributes = [aelt1]
apt1.term = ('general', title) # XXX or should be characterString, not general, but only when V3.
rpnq = RPNQuery (attributeSet = Z3950_ATTRS_BIB1_ov)
rpnq.rpn = ('op', ('attrTerm', apt1))
return rpnq
def_host = 'LC'
host_dict = {'BIBSYS': ('z3950.bibsys.no', 2100, 'BIBSYS'),
'YAZ': ('127.0.0.1', 9999, 'foo'),
'LCTEST' : ('ilssun2.loc.gov', 7090, 'Voyager'),
'LC' : ('z3950.loc.gov', 7090, 'Voyager'),
'NLC' : ('amicus.nlc-bnc.ca', 210, 'NL'),
'BNC' : ('amicus.nlc-bnc.ca', 210, 'NL'),
# On parle franc,ais aussi.
'LOCAL': ('127.0.0.1', 9999, 'Default'),
'LOCAL2': ('127.0.0.1', 2101, 'foo'),
'BL' :('blpcz.bl.uk', 21021, 'BLPC-ALL'),
'BELLLABS' : ('z3950.bell-labs.com', 210, 'books'),
'BIBHIT' : ('www.bibhit.dk', 210, 'Default'),
'YALE': ('webpac.library.yale.edu', 210, 'YALEOPAC'),
'OXFORD': ('library.ox.ac.uk', 210, 'ADVANCE'),
'OVID': ('z3950.ovid.com', 2213, 'pmed'), # scan only
'UC': ('ipac.lib.uchicago.edu', 210, 'uofc'),
'KUB' : ('dbiref.kub.nl', 1800, 'jel'),
'INDEXDATA' : ('muffin.indexdata.dk', 9004, 'thatt')}
# last two are Zthes servers.
if __name__ == '__main__':
optlist, args = getopt.getopt (sys.argv[1:], 'e:sh:tc:l:')
server = 0
host = def_host
test = 0
charset_list = None
lang_list = None
for (opt, val) in optlist:
if opt == '-s':
server = 1
elif opt == '-h':
host = val
elif opt == '-t':
test = 1
elif opt == '-e':
out_encoding = val
elif opt == '-c':
charset_list = val.split (',')
elif opt == '-l':
lang_list = val.split (',')
if server:
run_server (test)
host = host.upper ()
(name, port, dbname) = host_dict.get (host, host_dict[def_host])
cli = Client (name, port, charset = charset_list,
lang = lang_list)
cli.test = test
cli.set_dbnames ([dbname])
print "Starting search"
# rpnq = mk_simple_query ('Perec, Georges')
# rpnq = mk_simple_query ('Johnson, Kim')
rpnq = mk_compound_query ()
if cli.search (rpnq, smallSetUpperBound = 0, mediumSetPresentNumber = 0,
largeSetLowerBound = 1):
disp_resp (cli.present (recsyn = Z3950_RECSYN_USMARC_ov))
else:
print "Not found"
print "Deleting"
cli.delete (default_resultSetName)
cli.delete ('bogus')
print "Closing"
try:
cli.close ()
except ConnectionError:
# looks like LC, at least, sends a FIN on receipt of Close PDU
# guess we should check for gracefullness of close, and complain
# if not.
pass

File diff suppressed because it is too large Load Diff

View File

@ -1,340 +0,0 @@
#!/usr/bin/env python
import codecs
from PyZ3950.z3950_2001 import *
from PyZ3950.oids import *
asn1.register_oid (Z3950_RECSYN_GRS1, GenericRecord)
asn1.register_oid (Z3950_RECSYN_SUTRS, asn1.GeneralString)
asn1.register_oid (Z3950_RECSYN_EXPLAIN, Explain_Record)
asn1.register_oid (Z3950_RECSYN_OPAC, OPACRecord)
asn1.register_oid (Z3950_ES_PERSISTRS, PersistentResultSet)
asn1.register_oid (Z3950_ES_PERSISTQRY, PersistentQuery)
asn1.register_oid (Z3950_ES_PERIODQRY, PeriodicQuerySchedule)
asn1.register_oid (Z3950_ES_ITEMORDER, ItemOrder)
asn1.register_oid (Z3950_ES_DBUPDATE, Update)
asn1.register_oid (Z3950_ES_DBUPDATE_REV_1, Update_updrev1)
asn1.register_oid (Z3950_ES_EXPORTSPEC, ExportSpecification)
asn1.register_oid (Z3950_ES_EXPORTINV, ExportInvocation)
asn1.register_oid (Z3950_USR_SEARCHRES1, SearchInfoReport)
asn1.register_oid (Z3950_USR_INFO1, OtherInformation)
asn1.register_oid (Z3950_NEG_CHARSET3, CharSetandLanguageNegotiation_3)
asn1.register_oid (Z3950_USR_PRIVATE_OCLC_INFO, OCLC_UserInformation)
# below here is subject to change without notice, as I try to
# figure out the appropriate balance between convenience and flexibility
trace_charset = 0
impl_vers = "1.0 beta" # XXX
implementationId = 'PyZ39.50 - contact asl2@pobox.com' # haven't been assigned an official id, apply XXX
def make_attr(set=None, atype=None, val=None, valType=None):
ae = AttributeElement()
if (set <> None):
ae.attributeSet = set
ae.attributeType = atype
if (valType == 'numeric' or (valType == None and isinstance(val, int))):
ae.attributeValue = ('numeric', val)
else:
cattr = AttributeElement['attributeValue']['complex']()
if (valType == None):
valType = 'string'
cattr.list = [(valType, val)]
ae.attributeValue = ('complex', cattr)
return ae
# This list is needed to support recordsInSelectedCharSets == 0 when
# character set negotiation is in effect. The reason we don't
# just iterate over Z3950_RECSYN is that many of those are carried
# in OCTET STRINGs, and thus immune to negotiation; but maybe we should
# anyway.
retrievalRecord_oids = [
Z3950_RECSYN_EXPLAIN_ov,
Z3950_RECSYN_SUTRS_ov,
Z3950_RECSYN_OPAC_ov,
Z3950_RECSYN_SUMMARY_ov,
Z3950_RECSYN_GRS1_ov,
Z3950_RECSYN_ES_ov,
Z3950_RECSYN_FRAGMENT_ov,
Z3950_RECSYN_SQL_ov]
def register_retrieval_record_oids (ctx, new_codec_name = 'ascii'):
new_codec = codecs.lookup (new_codec_name)
def switch_codec ():
ctx.push_codec ()
ctx.set_codec (asn1.GeneralString, new_codec)
for oid in retrievalRecord_oids:
ctx.register_charset_switcher (oid, switch_codec)
iso_10646_oid_to_name = {
UNICODE_PART1_XFERSYN_UCS2_ov : 'utf-16', # XXX ucs-2 should differ from utf-16, in that ucs-2 forbids any characters not in the BMP, whereas utf-16 is a 16-bit encoding which encodes those characters into multiple 16-bit units
# UNICODE_PART1_XFERSYN_UCS4_ov : 'ucs-4', # XXX no python support for this encoding?
UNICODE_PART1_XFERSYN_UTF16_ov : 'utf-16',
UNICODE_PART1_XFERSYN_UTF8_ov : 'utf-8'
}
def try_get_iso10646_oid (charset_name):
for k,v in iso_10646_oid_to_name.iteritems ():
if charset_name == v:
return k
# XXX note that we don't know which of {UCS2, UTF16} oids we'll
# get from this.
def asn_charset_to_name (charset_tup):
if trace_charset:
print "asn_charset_to_name", charset_tup
charset_name = None
(typ, charset) = charset_tup
if typ == 'iso10646':
charset_name = iso_10646_oid_to_name.get (charset.encodingLevel,
None)
elif typ == 'private':
(spectyp, val) = charset
if spectyp == 'externallySpecified':
oid = getattr (val, 'direct_reference', None)
if oid == Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov:
enctyp, encval = val.encoding
if enctyp == 'octet-aligned':
charset_name = encval
if trace_charset:
print "returning charset", charset_name
return charset_name
def charset_to_asn (charset_name):
oid = try_get_iso10646_oid (charset_name)
if oid <> None:
iso10646 = Iso10646_3 ()
iso10646.encodingLevel = oid
return ('iso10646', iso10646)
else:
ext = asn1.EXTERNAL ()
ext.direct_reference = Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov
ext.encoding = ('octet-aligned', charset_name)
return ('private', ('externallySpecified', ext))
class CharsetNegotReq:
def __init__ (self, charset_list = None, lang_list = None,
records_in_charsets = None):
"""charset_list is a list of character set names, either ISO10646
(UTF-8 or UTF-16), or private. We support Index Data's semantics
for private character sets (see
http://www.indexdata.dk/pipermail/yazlist/2003-March/000504.html), so
you can pass any character set name for which Python has a codec installed
(but please don't use rot13 in production). Note that there should be
at most one of each of (ISO10646, private). (No, I don't know why, but
it says so in the ASN.1 definition comments.)
lang_list is a list of language codes, as defined in ANSI Z39.53-1994
(see, e.g., http://xml.coverpages.org/nisoLang3-1994.html).
records_in_charsets governs whether charset negotiation applies to
records, as well.)
Any of these parameters can be None, since the corresponding
elements in the ASN.1 are OPTIONAL.
"""
self.charset_list = charset_list
self.lang_list = lang_list
self.records_in_charsets = records_in_charsets
def __str__ (self):
return "Charset negot request %s %s %s" % (
str (self.charset_list), str (self.lang_list),
str (self.records_in_charsets))
def pack_proposal (self):
origin_prop = OriginProposal_3 ()
if self.charset_list <> None:
proposedCharSets = []
for charset_name in self.charset_list:
proposedCharSets.append (charset_to_asn (charset_name))
origin_prop.proposedCharSets = proposedCharSets
if self.lang_list <> None:
origin_prop.proposedlanguages = self.lang_list
if self.records_in_charsets <> None:
origin_prop.recordsInSelectedCharSets = (
self.records_in_charsets)
return ('proposal', origin_prop)
def unpack_proposal (self, csn):
(tag, proposal) = csn
assert (tag == 'proposal')
pcs = getattr (proposal, 'proposedCharSets', None)
if pcs <> None:
if trace_charset:
print "pcs", pcs
self.charset_list = []
for charset in pcs:
charset_name = asn_charset_to_name (charset)
if charset_name <> None:
self.charset_list.append (charset_name)
lang = getattr (proposal, 'proposedlanguages', None)
if lang <> None:
self.lang_list = lang
self.records_in_charsets = getattr (proposal,
'recordsInSelectedCharSets', None)
class CharsetNegotResp:
def __init__ (self, charset = None, lang = None,
records_in_charsets = None):
self.charset = charset
self.lang = lang
self.records_in_charsets = records_in_charsets
def __str__ (self):
return "CharsetNegotResp: %s %s %s" % (
str (self.charset), str (self.lang),
str (self.records_in_charsets))
def unpack_negot_resp (self, neg_resp):
typ, val = neg_resp
assert (typ == 'response')
self.charset = None
scs = getattr (val, 'selectedCharSets', None)
if scs <> None:
self.charset = asn_charset_to_name (scs)
self.lang = getattr (val, 'selectedLanguage', None)
self.records_in_charsets = getattr (
val, 'recordsInSelectedCharSets', None)
def pack_negot_resp (self):
resp = TargetResponse_3 ()
if self.charset <> None:
resp.selectedCharSets = charset_to_asn (self.charset)
if self.lang <> None:
resp.selectedLanguage = self.lang
if self.records_in_charsets <> None:
resp.recordsInSelectedCharSets = self.records_in_charsets
return ('response', resp)
def get_charset_negot (init): # can be passed either InitializeRequest or InitializeResponse
if trace_charset:
print init
if not init.options ['negotiation']:
return None
otherInfo = []
if hasattr (init, 'otherInfo'):
otherInfo = init.otherInfo
elif hasattr (init, 'userInformationField'):
ui = init.userInformationField
if ui.direct_reference == Z3950_USR_INFO1_ov:
(enctype, otherInfo) = ui.encoding
for oi in otherInfo:
if trace_charset:
print oi
(typ, val) = oi.information
if typ == 'externallyDefinedInfo':
if val.direct_reference == Z3950_NEG_CHARSET3_ov:
(typ, val) = val.encoding
if typ == 'single-ASN1-type':
return val
return None
def set_charset_negot (init, val, v3_flag):
# again, can be passed either InitializeRequest or Response
negot = asn1.EXTERNAL ()
negot.direct_reference = Z3950_NEG_CHARSET3_ov
negot.encoding= ('single-ASN1-type', val)
OtherInfoElt = OtherInformation[0]
oi_elt = OtherInfoElt ()
oi_elt.information = ('externallyDefinedInfo', negot)
other_info = [oi_elt]
if trace_charset:
print v3_flag, oi_elt
if v3_flag:
init.otherInfo = other_info
else:
ui = asn1.EXTERNAL ()
ui.direct_reference = Z3950_USR_INFO1_ov
ui.encoding = ('single-ASN1-type', other_info) # XXX test this
# see http://lcweb.loc.gov/z3950/agency/defns/user-1.html
init.userInformationField = ui
def_msg_size = 0x10000
# rethink optionslist. Maybe we should just turn on all the
# bits the underlying code supports? We do need to be able to
# turn off multiple result sets for testing (see tests/test2.py),
# but that doesn't have to be the default.
def make_initreq (optionslist = None, authentication = None, v3 = 0,
negotiate_charset = 0, preferredMessageSize = 0x100000,
maximumRecordSize = 0x100000, implementationId = "",
implementationName = "", implementationVersion = ""):
# see http://lcweb.loc.gov/z3950/agency/wisdom/unicode.html
InitReq = InitializeRequest ()
InitReq.protocolVersion = ProtocolVersion ()
InitReq.protocolVersion ['version_1'] = 1
InitReq.protocolVersion ['version_2'] = 1
InitReq.protocolVersion ['version_3'] = v3
InitReq.options = Options ()
if optionslist <> None:
for o in optionslist:
InitReq.options[o] = 1
InitReq.options ['search'] = 1
InitReq.options ['present'] = 1
InitReq.options ['delSet'] = 1
InitReq.options ['scan'] = 1
InitReq.options ['sort'] = 1
InitReq.options ['extendedServices'] = 1
InitReq.options ['dedup'] = 1
InitReq.options ['negotiation'] = negotiate_charset # XXX can negotiate other stuff, too
# Preferred and Exceptional msg sizes are pretty arbitrary --
# we dynamically allocate no matter what
InitReq.preferredMessageSize = preferredMessageSize
InitReq.exceptionalRecordSize = maximumRecordSize
if (implementationId):
InitReq.implementationId = implementationId
else:
InitReq.implementationId = impl_id
if (implementationName):
InitReq.implementationName = implementationName
else:
InitReq.implementationName = 'PyZ3950'
if (implementationVersion):
InitReq.implementationVersion = implementationVersion
else:
InitReq.implementationVersion = impl_vers
if authentication <> None:
class UP: pass
up = UP ()
upAttrList = ['userId', 'password', 'groupId']
for val, attr in zip (authentication, upAttrList): # silently truncate
if val <> None:
setattr (up, attr, val)
InitReq.idAuthentication = ('idPass', up)
return InitReq
def make_sreq (query, dbnames, rsn, **kw):
sreq = SearchRequest ()
sreq.smallSetUpperBound = 0
sreq.largeSetLowerBound = 1
sreq.mediumSetPresentNumber = 0
# as per http://lcweb.loc.gov/z3950/lcserver.html, Jun 07 2001,
# to work around Endeavor bugs in 1.13
sreq.replaceIndicator = 1
sreq.resultSetName = rsn
sreq.databaseNames = dbnames
sreq.query = query
for (key, val) in kw.items ():
setattr (sreq, key, val)
return sreq

File diff suppressed because it is too large Load Diff

View File

@ -1,965 +0,0 @@
#!/usr/bin/env python
"""Implements the ZOOM 1.4 API (http://zoom.z3950.org/api)
for Z39.50.
Some global notes on the binding (these will only make sense when read
after the API document):
Get/Set Option is implemented as member attribute access or
assignment. Implementations are encouraged to throw an AttributeError
for unsupported (or, possibly, mistyped) attributes. (Production
applications are encouraged to catch such errors.)
All errors are reported as exceptions deriving from ZoomError (or, at
least, it's a bug if they aren't). Bib1Err is defined as part of the
binding; all the rest are specific to this implementation.
ResultSet provides a sequence interface, with standard Python
iteration, indexing, and slicing. So if rs is a ResultSet, use len
(rs) for Get_Size and rs[i] for Get_Record, or iterate with for r in
rs: foo(r). Any attempt to access a record for which the server
returned a surrogate diagnostic will raise the appropriate Bib1Err
exception.
For Record, Render_Record is implemented as Python __str__. The
'syntax' member contains the string-format record syntax, and the
'data' member contains the raw data.
The following query types are supported:
- "CCL", ISO 8777, (http://www.indexdata.dk/yaz/doc/tools.tkl#CCL)
- "S-CCL", the same, but interpreted on the server side
- "CQL", the Common Query Language, (http://www.loc.gov/z3950/agency/zing/cql/)
- "S-CQL", the same, but interpreted on the server side
- "PQF", Index Data's Prefix Query Format, (http://www.indexdata.dk/yaz/doc/tools.tkl#PQF)
- "C2", Cheshire II query syntax, (http://cheshire.berkeley.edu/cheshire2.html#zfind)
- "ZSQL", Z-SQL, see (http://archive.dstc.edu.au/DDU/projects/Z3950/Z+SQL/)
- "CQL-TREE", a general-purpose escape allowing any object with a toRPN method to be used, e.g. the CQL tree objects
ScanSet, like ResultSet, has a sequence interface. The i-th element
is a dictionary. See the ScanSet documentation for supported keys.
Sample usage:
from PyZ3950 import zoom
conn = zoom.Connection ('z3950.loc.gov', 7090)
conn.databaseName = 'VOYAGER'
conn.preferredRecordSyntax = 'USMARC'
query = zoom.Query ('CCL', 'ti="1066 and all that"')
res = conn.search (query)
for r in res:
print str(r)
conn.close ()
I hope everything else is clear from the docstrings and the abstract
API: let me know if that's wrong, and I'll try to do better.
For some purposes (I think the only one is writing Z39.50 servers),
you may want to use the functions in the z3950 module instead. """
from __future__ import nested_scopes
__author__ = 'Aaron Lav (asl2@pobox.com)'
__version__ = '1.0' # XXX
import getopt
import sys
# TODO:
# finish lang/charset (requires charset normalization, confer w/ Adam)
# implement piggyback
# implement schema (Non useful)
# implement setname (Impossible?)
from PyZ3950 import z3950
from PyZ3950 import ccl
from PyZ3950 import asn1
from PyZ3950 import zmarc
from PyZ3950 import bib1msg
from PyZ3950 import grs1
from PyZ3950 import oids
# Azaroth 2003-12-04:
from PyZ3950 import CQLParser, SRWDiagnostics, pqf
from PyZ3950 import c2query as c2
asn1.register_oid (oids.Z3950_QUERY_SQL, z3950.SQLQuery)
def my_enumerate (l): # replace w/ enumerate when we go to Python 2.3
return zip (range (len (l)), l)
trace_extract = 0
"""trace extracting records from search/present reqs"""
class ZoomError (Exception):
"""Base class for all errors reported from this module"""
pass
class ConnectionError(ZoomError):
"""Exception for TCP error"""
pass
class ClientNotImplError (ZoomError):
"""Exception for ZOOM client-side functionality not implemented (bug
author)"""
pass
class ServerNotImplError (ZoomError):
"""Exception for function not implemented on server"""
pass
class QuerySyntaxError (ZoomError):
"""Exception for query not parsable by client"""
pass
class ProtocolError (ZoomError):
"""Exception for malformatted server response"""
pass
class UnexpectedCloseError (ProtocolError):
"""Exception for unexpected (z3950, not tcp) close from server"""
pass
class UnknownRecSyn (ZoomError):
"""Exception for unknown record syntax returned from server"""
pass
class Bib1Err (ZoomError):
"""Exception for BIB-1 error"""
def __init__ (self, condition, message, addtlInfo):
self.condition = condition
self.message = message
self.addtlInfo = addtlInfo
ZoomError.__init__ (self)
def __str__ (self):
return "Bib1Err: %d %s %s" % (self.condition, self.message, self.addtlInfo)
class _ErrHdlr:
"""Error-handling services"""
err_attrslist = ['errCode','errMsg', 'addtlInfo']
def err (self, condition, addtlInfo, oid):
"""Translate condition + oid to message, save, and raise exception"""
self.errCode = condition
self.errMsg = bib1msg.lookup_errmsg (condition, oid)
self.addtlInfo = addtlInfo
raise Bib1Err (self.errCode, self.errMsg, self.addtlInfo)
def err_diagrec (self, diagrec):
(typ, data) = diagrec
if typ == 'externallyDefined':
raise ClientNotImplErr ("Unknown external diagnostic" + str (data))
addinfo = data.addinfo [1] # don't care about v2 vs v3
self.err (data.condition, addinfo, data.diagnosticSetId)
_record_type_dict = {}
"""Map oid to renderer, field-counter, and field-getter functions"""
def _oid_to_key (oid):
for (k,v) in _record_type_dict.items ():
if v.oid == oid:
return k
raise UnknownRecSyn (oid)
def _extract_attrs (obj, attrlist):
kw = {}
for key in attrlist:
if hasattr (obj, key):
kw[key] = getattr (obj, key)
return kw
class _AttrCheck:
"""Prevent typos"""
attrlist = []
not_implement_attrs = []
def __setattr__ (self, attr, val):
"""Ensure attr is in attrlist (list of allowed attributes), or
private (begins w/ '_'), or begins with 'X-' (reserved for users)"""
if attr[0] == '_' or attr in self.attrlist or attr[0:2] == 'X-':
self.__dict__[attr] = val
elif (attr in self.not_implement_attrs):
raise ClientNotImplError(attr)
else:
raise AttributeError (attr, val)
class Connection(_AttrCheck, _ErrHdlr):
"""Connection object"""
not_implement_attrs = ['piggyback',
'schema',
'proxy',
'async']
search_attrs = ['smallSetUpperBound',
'largeSetLowerBound',
'mediumSetPresentNumber',
'smallSetElementSetNames',
'mediumSetElementSetNames']
init_attrs = ['user',
'password',
'group',
'maximumRecordSize',
'preferredMessageSize',
'lang',
'charset',
'implementationId',
'implementationName',
'implementationVersion'
]
scan_zoom_to_z3950 = {
# translate names from ZOOM spec to Z39.50 spec names
'stepSize' : 'stepSize',
'numberOfEntries' : 'numberOfTermsRequested',
'responsePosition' : 'preferredPositionInResponse'
}
attrlist = search_attrs + init_attrs + scan_zoom_to_z3950.keys () + [
'databaseName',
'namedResultSets',
'preferredRecordSyntax', # these three inheritable by RecordSet
'elementSetName',
'presentChunk',
'targetImplementationId',
'targetImplementationName',
'targetImplementationVersion',
'host',
'port',
] + _ErrHdlr.err_attrslist
_queryTypes = ['S-CQL', 'S-CCL', 'RPN', 'ZSQL']
_cli = None
host = ""
port = 0
# and now, some defaults
namedResultSets = 1
elementSetName = 'F'
preferredRecordSyntax = 'USMARC'
preferredMessageSize = 0x100000
maximumRecordSize = 0x100000
stepSize = 0
numberOfEntries = 20 # for SCAN
responsePosition = 1
databaseName = 'Default'
implementationId = 'PyZ3950'
implementationName = 'PyZ3950 1.0/ZOOM v1.4'
implementationVersion = '1.0'
lang = None
charset = None
user = None
password = None
group = None
presentChunk = 20 # for result sets
def __init__(self, host, port, connect=1, **kw):
"""Establish connection to hostname:port. kw contains initial
values for options, and is useful for options which affect
the InitializeRequest. Currently supported values:
user Username for authentication
password Password for authentication
group Group for authentication
maximumRecordSize Maximum size in bytes of one record
preferredMessageSize Maximum size in bytes for response
lang 3 letter language code
charset Character set
implementationId Id for client implementation
implementationName Name for client implementation
implementationVersion Version of client implementation
"""
self.host = host
self.port = port
self._resultSetCtr = 0
for (k,v) in kw.items ():
setattr (self, k, v)
if (connect):
self.connect()
def connect(self):
self._resultSetCtr += 1
self._lastConnectCtr = self._resultSetCtr
# Bump counters first, since even if we didn't reconnect
# this time, we could have, and so any use of old connections
# is an error. (Old cached-and-accessed data is OK to use:
# cached but not-yet-accessed data is probably an error, but
# a not-yet-caught error.)
if self._cli <> None and self._cli.sock <> None:
return
initkw = {}
for attr in self.init_attrs:
initkw[attr] = getattr(self, attr)
if (self.namedResultSets):
options = ['namedResultSets']
else:
options = []
initkw ['ConnectionError'] = ConnectionError
initkw ['ProtocolError'] = ProtocolError
initkw ['UnexpectedCloseError'] = UnexpectedCloseError
self._cli = z3950.Client (self.host, self.port,
optionslist = options, **initkw)
self.namedResultSets = self._cli.get_option ('namedResultSets')
self.targetImplementationId = getattr (self._cli.initresp, 'implementationId', None)
self.targetImplementationName = getattr (self._cli.initresp, 'implementationName', None)
self.targetImplementationVersion = getattr (self._cli.initresp, 'implementationVersion', None)
if (hasattr (self._cli.initresp, 'userInformationField')):
# weird. U of Chicago returns an EXTERNAL with nothing
# but 'encoding', ('octet-aligned', '2545') filled in.
if (hasattr (self._cli.initresp.userInformationField,
'direct_reference') and
self._cli.initresp.userInformationField.direct_reference ==
oids.Z3950_USR_PRIVATE_OCLC_INFO_ov):
# see http://www.oclc.org/support/documentation/firstsearch/z3950/fs_z39_config_guide/ for docs
oclc_info = self._cli.initresp.userInformationField.encoding [1]
# the docs are a little unclear, but I presume we're
# supposed to report failure whenever a failReason is given.
if hasattr (oclc_info, 'failReason'):
raise UnexpectedCloseError ('OCLC_Info ',
oclc_info.failReason,
getattr (oclc_info, 'text',
' no text given '))
def search (self, query):
"""Search, taking Query object, returning ResultSet"""
if (not self._cli):
self.connect()
assert (query.typ in self._queryTypes)
dbnames = self.databaseName.split ('+')
self._cli.set_dbnames (dbnames)
cur_rsn = self._make_rsn ()
recv = self._cli.search_2 (query.query,
rsn = cur_rsn,
**_extract_attrs (self, self.search_attrs))
self._resultSetCtr += 1
rs = ResultSet (self, recv, cur_rsn, self._resultSetCtr)
return rs
# and 'Error Code', 'Error Message', and 'Addt'l Info' methods still
# eeded
def scan (self, query):
if (not self._cli):
self.connect()
self._cli.set_dbnames ([self.databaseName])
kw = {}
for k, xl in self.scan_zoom_to_z3950.items ():
if hasattr (self, k):
kw [xl] = getattr (self, k)
return ScanSet (self._cli.scan (query.query, **kw))
def _make_rsn (self):
"""Return result set name"""
if self.namedResultSets:
return "rs%d" % self._resultSetCtr
else:
return z3950.default_resultSetName
def close (self):
"""Close connection"""
self._cli.close ()
def sort (self, sets, keys):
""" Sort sets by keys, return resultset interface """
if (not self._cli):
self.connect()
# XXX This should probably be shuffled down into z3950.py
sortrelations = ['ascending', 'descending', 'ascendingByFrequency', 'descendingByFrequency']
req = z3950.SortRequest()
req.inputResultSetNames = []
for s in sets:
s._check_stale ()
req.inputResultSetNames.append(s._resultSetName)
cur_rsn = self._make_rsn()
req.sortedResultSetName = cur_rsn
zkeys = []
for k in keys:
zk = z3950.SortKeySpec()
zk.sortRelation = sortrelations.index(k.relation)
zk.caseSensitivity = k.caseInsensitive
if (k.missingValueAction):
zk.missingValueAction = (k.missingValueAction, None)
if (k.missingValueData):
zk.missingValueAction = ('missingValueData', k.missingValueData)
value = k.sequence
if (k.type == 'accessPoint'):
if (value.typ <> 'RPN'):
raise ValueError # XXX
l = z3950.SortKey['sortAttributes']()
l.id = value.query[1].attributeSet
l.list = value.query[1].rpn[1][1].attributes
seq = ('sortAttributes', l)
elif (k.type == 'private'):
seq = ('privateSortKey', value)
elif (k.type == 'elementSetName'):
spec = z3950.Specification()
spec.elementSpec = ('elementSetName', value)
seq = ('elementSpec', spec)
else:
raise ValueError # XXX
spec = ('generic', seq)
zk.sortElement = spec
zkeys.append(zk)
req.sortSequence = zkeys
recv = self._cli.transact(('sortRequest', req), 'sortResponse')
self._resultSetCtr += 1
if (hasattr(recv, 'diagnostics')):
diag = recv.diagnostics[0][1]
self.err(diag.condition, diag.addinfo, diag.diagnosticSetId)
if (not hasattr(recv, 'resultCount')):
# First guess: sum of all input sets
recv.resultCount = 0
for set in sets:
recv.resultCount += len(set)
# Check for addInfo to override
try:
val = recv.otherInfo[0].information[1]
if (val[:14] == 'Result-count: '):
recv.resultCount = int(val[14:])
except:
pass
rs = ResultSet (self, recv, cur_rsn, self._resultSetCtr)
return rs
class SortKey(_AttrCheck):
attrlist = ['relation', 'caseInsensitive', 'missingValueAction', 'missingValueData', 'type', 'sequence']
relation = "ascending"
caseInsensitive = 1
missingValueAction = ""
missingValueData = ""
type = "accessPoint"
sequence = ""
def __init__ (self, **kw):
for k in kw.keys():
setattr(self, k, kw[k])
class Query:
def __init__ (self, typ, query):
"""Creates Query object.
Supported query types: CCL, S-CCL, CQL, S-CQL, PQF, C2, ZSQL, CQL-TREE
"""
typ = typ.upper()
# XXX maybe replace if ... elif ... with dict mapping querytype to func
if typ == 'CCL':
self.typ = 'RPN'
try:
self.query = ccl.mk_rpn_query (query)
except ccl.QuerySyntaxError, err:
print "zoom raising", str (err), " for", query
raise QuerySyntaxError (str(err))
elif typ == 'S-CCL': # server-side ccl
self.typ = typ
self.query = ('type-2', query)
elif typ == 'S-CQL': # server-side cql
self.typ = typ
xq = asn1.EXTERNAL()
xq.direct_reference = oids.Z3950_QUERY_CQL_ov
xq.encoding = ('single-ASN1-type', query)
self.query = ('type_104', xq)
elif typ == 'CQL': # CQL to RPN transformation
self.typ = 'RPN'
try:
q = CQLParser.parse(query)
rpnq = z3950.RPNQuery()
# XXX Allow Attribute Architecture somehow?
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
rpnq.rpn = q.toRPN()
self.query = ('type_1', rpnq)
except SRWDiagnostics.SRWDiagnostic, err:
raise err
except:
raise QuerySyntaxError
elif typ == 'PQF': # PQF to RPN transformation
self.typ = 'RPN'
try:
self.query = pqf.parse(query)
except:
raise QuerySyntaxError
elif typ == 'C2': # Cheshire2 Syntax
self.typ = 'RPN'
try:
q = c2.parse(query)
self.query = q[0]
except:
raise QuerySyntaxError
elif typ == 'ZSQL': # External SQL
self.typ = typ
xq = asn1.EXTERNAL()
xq.direct_reference = oids.Z3950_QUERY_SQL_ov
q = z3950.SQLQuery()
q.queryExpression = query
xq.encoding = ('single-ASN1-type', q)
self.query = ('type_104', xq)
elif typ == 'CQL-TREE': # Tree to RPN
self.typ = 'RPN'
try:
rpnq = z3950.RPNQuery()
# XXX Allow Attribute Architecture
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
rpnq.rpn = query.toRPN()
self.query = ('type_1', rpnq)
except SRWDiagnostics.SRWDiagnostic, err:
raise err
except:
raise QuerySyntaxError
else:
raise ClientNotImplError ('%s queries not supported' % typ)
class ResultSet(_AttrCheck, _ErrHdlr):
"""Cache results, presenting read-only sequence interface. If
a surrogate diagnostic is returned for the i-th record, an
appropriate exception will be raised on access to the i-th
element (either access by itself or as part of a slice)."""
inherited_elts = ['elementSetName', 'preferredRecordSyntax',
'presentChunk']
attrlist = inherited_elts + _ErrHdlr.err_attrslist
not_implement_attrs = ['piggyback',
'schema']
def __init__ (self, conn, searchResult, resultSetName, ctr):
"""Only for creation by Connection object"""
self._conn = conn # needed for 'option inheritance', see ZOOM spec
self._searchResult = searchResult
self._resultSetName = resultSetName
self._records = {}
self._ctr = ctr
# _records is a dict indexed by preferredRecordSyntax of
# dicts indexed by elementSetName of lists of records
self._ensure_recs ()
# whether there are any records or not, there may be
# nonsurrogate diagnostics. _extract_recs will get them.
if hasattr (self._searchResult, 'records'):
self._extract_recs (self._searchResult.records, 0)
def __getattr__ (self, key):
"""Forward attribute access to Connection if appropriate"""
if self.__dict__.has_key (key):
return self.__dict__[key]
if key in self.inherited_elts:
return getattr (self._conn, key) # may raise AttributeError
raise AttributeError (key)
def _make_keywords (self):
"""Set up dict of parms for present request"""
kw = {}
# need for translation here from preferredRecordSyntax to recsyn
# is kinda pointless
if hasattr (self, 'preferredRecordSyntax'):
try:
kw['recsyn'] = _record_type_dict [
self.preferredRecordSyntax].oid
except KeyError, err:
raise ClientNotImplError ('Unknown record syntax ' +
self.preferredRecordSyntax)
if hasattr (self, 'elementSetName'):
kw['esn'] = ('genericElementSetName', self.elementSetName)
return kw
def __len__ (self):
"""Get number of records"""
return self._searchResult.resultCount
def _pin (self, i):
"""Handle negative indices"""
if i < 0:
return i + len (self)
return i
def _ensure_recs (self):
if not self._records.has_key (self.preferredRecordSyntax):
self._records [self.preferredRecordSyntax] = {}
self._records [self.preferredRecordSyntax][
self.elementSetName] = [None] * len (self)
if not self._records[self.preferredRecordSyntax].has_key (
self.elementSetName):
self._records [self.preferredRecordSyntax][
self.elementSetName] = [None] * len (self)
def _get_rec (self, i):
return self._records [self.preferredRecordSyntax][
self.elementSetName][i]
def _check_stale (self):
if self._ctr < self._conn._lastConnectCtr:
raise ConnectionError ('Stale result set used')
# XXX is this right?
if (not self._conn.namedResultSets) and \
self._ctr <> self._conn._resultSetCtr:
raise ServerNotImplError ('Multiple Result Sets')
# XXX or this?
def _ensure_present (self, i):
self._ensure_recs ()
if self._get_rec (i) == None:
self._check_stale ()
maxreq = self.presentChunk
if maxreq == 0: # get everything at once
lbound = i
count = len (self) - lbound
else:
lbound = (i / maxreq) * maxreq
count = min (maxreq, len (self) - lbound)
kw = self._make_keywords ()
if self._get_rec (lbound) == None:
presentResp = self._conn._cli.present (
start = lbound + 1, # + 1 b/c 1-based
count = count,
rsn = self._resultSetName,
**kw)
if not hasattr (presentResp, 'records'):
raise ProtocolError (str (presentResp))
self._extract_recs (presentResp.records, lbound)
# Maybe there was too much data to fit into
# range (lbound, lbound + count). If so, try
# retrieving just one record. XXX could try
# retrieving more, up to next cache bdary.
if i <> lbound and self._get_rec (i) == None:
presentResp = self._conn._cli.present (
start = i + 1,
count = 1,
rsn = self._resultSetName,
**kw)
self._extract_recs (presentResp.records, i)
rec = self._records [self.preferredRecordSyntax][
self.elementSetName][i]
if rec <> None and rec.is_surrogate_diag ():
rec.raise_exn ()
def __getitem__ (self, i):
"""Ensure item is present, and return a Record"""
i = self._pin (i)
if i >= len (self):
raise IndexError
self._ensure_present (i)
return self._records [self.preferredRecordSyntax][
self.elementSetName][i]
def __getslice__(self, i, j):
i = self._pin (i)
j = self._pin (j)
if j > len (self):
j = len (self)
for k in range (i, j):
self._ensure_present (k)
if len (self._records) == 0: # XXX is this right?
return []
return self._records[self.preferredRecordSyntax][
self.elementSetName] [i:j]
def _extract_recs (self, records, lbound):
(typ, recs) = records
if trace_extract:
print "Extracting", len (recs), "starting at", lbound
if typ == 'nonSurrogateDiagnostic':
self.err (recs.condition, "", recs.diagnosticSetId)
elif typ == 'multipleNonSurDiagnostics':
# see Zoom mailing list discussion of 2002/7/24 to justify
# ignoring all but first error.
diagRec = recs [0]
self.err_diagrec (diagRec)
if (typ <> 'responseRecords'):
raise ProtocolError ("Bad records typ " + str (typ) + str (recs))
for i,r in my_enumerate (recs):
r = recs [i]
dbname = getattr (r, 'name', '')
(typ, data) = r.record
if (typ == 'surrogateDiagnostic'):
rec = SurrogateDiagnostic (data)
elif typ == 'retrievalRecord':
oid = data.direct_reference
dat = data.encoding
(typ, dat) = dat
if (oid == oids.Z3950_RECSYN_USMARC_ov):
if typ <> 'octet-aligned':
raise ProtocolError (
"Weird record EXTERNAL MARC type: " + typ)
rec = Record (oid, dat, dbname)
else:
raise ProtocolError ("Bad typ %s data %s" %
(str (typ), str(data)))
self._records[self.preferredRecordSyntax][
self.elementSetName][lbound + i] = rec
def delete (self): # XXX or can I handle this w/ a __del__ method?
"""Delete result set"""
res = self._conn._cli.delete (self._resultSetName)
if res == None: return # server doesn't support Delete
# XXX should I throw an exn for delete errors? Probably.
# and 'Error Code', 'Error Message', and 'Addt'l Info' methods
def sort(self, keys):
return self._conn.sort([self], keys)
class SurrogateDiagnostic(_ErrHdlr):
"""Represent surrogate diagnostic. Raise appropriate exception
on access to syntax or data, or when raise_exn method is called.
Currently, RecordSet relies on the return from is_surrogate_diag (),
and calls raise_exn based on that."""
def __init__ (self, diagrec):
self.diagrec = diagrec
def is_surrogate_diag (self):
return 1
def raise_exn (self):
self.err_diagrec (self.diagrec)
def __getattr__ (self, attr):
if attr == 'data' or attr == 'syntax':
self.raise_exn ()
return _ErrHdlr.__getattr (self, attr)
class Record:
"""Represent retrieved record. 'syntax' attribute is a string,
'data' attribute is the data, which is:
USMARC -- raw MARC data
SUTRS -- a string (possibly in the future unicode)
XML -- ditto
GRS-1 -- a tree (see grs1.py for details)
EXPLAIN -- a hard-to-describe format (contact me if you're actually \
using this)
OPAC -- ditto
Other representations are not yet defined."""
def __init__ (self, oid, data, dbname):
"""Only for use by ResultSet"""
self.syntax = _oid_to_key (oid)
self._rt = _record_type_dict [self.syntax]
self.data = self._rt.preproc (data)
self.databaseName = dbname
def is_surrogate_diag (self):
return 0
def get_fieldcount (self):
"""Get number of fields"""
return self._rt.fieldcount (self.data)
def get_field (self,spec):
"""Get field"""
return self._rt.field (self.data, spec)
def __str__ (self):
"""Render printably"""
s = self._rt.renderer (self.data)
return 'Rec: ' + str (self.syntax) + " " + s
class _RecordType:
"""Map syntax string to OID and per-syntax utility functions"""
def __init__ (self, name, oid, renderer = lambda v:v,
fieldcount = lambda v:1, field = None, preproc = lambda v:v):
"""Register syntax"""
self.oid = oid
self.renderer = renderer
self.fieldcount = fieldcount
self.field = field
self.preproc = preproc
_record_type_dict [name] = self
# XXX do I want an OPAC class? Probably, and render_OPAC should be
# a member function.
def render_OPAC (opac_data):
s_list = []
biblio_oid = opac_data.bibliographicRecord.direct_reference
if (biblio_oid == z3950.Z3950_RECSYN_USMARC_ov):
bib_marc = zmarc.MARC (opac_data.bibliographicRecord.encoding [1])
s_list.append ("Bibliographic %s\n" % (str (bib_marc),) )
else:
s_list.append ("Unknown bibliographicRecord OID: " + str(biblio_oid))
for i, hd in my_enumerate (opac_data.holdingsData):
typ, data = hd
s_list.append ('Holdings %d:' % (i,))
if typ == 'holdingsAndCirc':
def render (item, level = 1):
s_list = []
if isinstance (item, asn1.StructBase):
for attr, val in item.__dict__.items ():
if attr [0] <> '_':
s_list.append ("%s%s: %s" % (
"\t" * level, attr, "\n".join(render (val, level + 1))))
elif (isinstance (item, type ([])) and len (item) > 0
and isinstance (item [0], asn1.StructBase)):
s_list.append ("") # generate newline
for i, v in my_enumerate (item):
s_list.append ("\t" * (level + 1) + str (i))
s_list += render (v, level + 1)
else:
s_list.append (repr (item))
return s_list
s_list.append ("\n".join (render (data)))
elif typ == 'marcHoldingsRecord':
hold_oid = data.direct_reference
if hold_oid == z3950.Z3950_RECSYN_USMARC_ov:
holdings_marc = zmarc.MARC (data.encoding [1])
s_list.append ("Holdings %s\n" % (str (holdings_marc),))
else:
s_list.append ("Unknown holdings OID: " + str (hold_oid))
else:
s_list.append ("Unknown holdings type: " + typ)
# shouldn't happen unless z39.50 definition is extended
return "\n".join (s_list)
_RecordType ('USMARC', z3950.Z3950_RECSYN_USMARC_ov,
renderer = lambda v: str(zmarc.MARC(v)))
_RecordType ('UKMARC', z3950.Z3950_RECSYN_UKMARC_ov,
renderer = lambda v: str(zmarc.MARC(v)))
_RecordType ('SUTRS', z3950.Z3950_RECSYN_SUTRS_ov)
_RecordType ('XML', z3950.Z3950_RECSYN_MIME_XML_ov)
_RecordType ('SGML', z3950.Z3950_RECSYN_MIME_SGML_ov)
_RecordType ('GRS-1', z3950.Z3950_RECSYN_GRS1_ov,
renderer = lambda v: str (v),
preproc = grs1.preproc)
_RecordType ('OPAC', z3950.Z3950_RECSYN_OPAC_ov, renderer = render_OPAC)
_RecordType ('EXPLAIN', z3950.Z3950_RECSYN_EXPLAIN_ov,
renderer = lambda v: str (v))
class ScanSet (_AttrCheck, _ErrHdlr):
"""Hold result of scan.
"""
zoom_to_z3950 = { # XXX need to provide more processing for attrs, alt
'freq' : 'globalOccurrences',
'display': 'displayTerm',
'attrs' : 'suggestedAttributes',
'alt' : 'alternativeTerm',
'other' : 'otherTermInfo'}
attrlist = _ErrHdlr.err_attrslist
def __init__ (self, scanresp):
"""For internal use only!"""
self._scanresp = scanresp
if hasattr (scanresp.entries, 'nonsurrogateDiagnostics'):
self.err_diagrec (scanresp.entries.nonsurrogateDiagnostics[0])
# Note that specification says that both entries and
# nonsurrogate diags can be present. This code will always
# raise the exn, and will need to be changed if both are needed.
def __len__ (self):
"""Return number of entries"""
return self._scanresp.numberOfEntriesReturned
def _get_rec (self, i):
if (not hasattr(self._scanresp.entries, 'entries')):
raise IndexError
t = self._scanresp.entries.entries[i]
if t[0] == 'termInfo':
return t[1]
else:
# Only way asserts can fail here is if someone changes
# the Z39.50 ASN.1 definitions.
assert (t[0] == 'surrogateDiagnostic')
diagRec = t[1]
if diagRec [0] == 'externallyDefined':
raise ClientNotImplError (
'Scan unknown surrogate diagnostic type: ' +
str (diagRec))
assert (diagRec[0] == 'defaultFormat')
defDiagFmt = diagRec [1]
self.err (defDiagFmt.condition, defDiagFmt.addinfo,
defDiagFmt.diagnosticSetId)
def get_term (self, i):
"""Return term. Note that get_{term,field,fields} can throw an
exception if the i'th term is a surrogate diagnostic."""
return self._get_rec (i).term
def get_field (self, field, i):
"""Returns value of field:
term: term
freq: integer
display: string
attrs: currently z3950 structure, should be string of attributes
alt: currently z3950 structure, should be [string of attrs, term]
other: currently z3950 structure, dunno what the best Python representation would be
"""
f = self.zoom_to_z3950 [field]
r = self._get_rec (i)
return r.__dict__[f]
def get_fields (self, i):
"""Return a dictionary mapping ZOOM's field names to values
present in the response. (Like get_field, but for all fields.)"""
r = self._get_rec (i)
d = {}
for k,v in self.zoom_to_z3950.items ():
val = getattr (r, v, None)
if val <> None:
d[k] = val
d["term"] = self.get_term (i)
return d
def _pin (self, i):
if i < 0:
return i + len (self)
return i
def __getitem__ (self, i):
return self.get_fields (self._pin (i))
def __getslice__ (self, i, j):
i = self._pin (i)
j = self._pin (j)
if j > len (self):
j = len (self)
return [self.get_fields (k) for k in range (i,j)]
if __name__ == '__main__':
optlist, args = getopt.getopt (sys.argv[1:], 'h:q:t:f:a:e:v:')
host = 'LC'
query = ''
qtype = 'CCL'
fmts = ['USMARC']
esns = ['F']
validation = None
for (opt, val) in optlist:
if opt == '-h':
host = val
elif opt == '-q':
query = val
elif opt == '-t':
qtype = val
elif opt == '-f':
fmts = val.split (',')
elif opt == '-e':
esns = val.split (',')
elif opt == '-v':
validation = val.split (',')
rv = z3950.host_dict.get (host)
if rv == None:
(name, port, dbname) = host.split (':')
port = int (port)
else:
(name, port, dbname) = rv
conn = Connection (name, port)
conn.databaseName = dbname
conn.preferredRecordSyntax = fmts [0]
def run_one (q):
try:
query = Query (qtype, q)
res = conn.search (query)
for esn in esns:
for syn in fmts:
print "Syntax", syn, "Esn", esn
res.preferredRecordSyntax = syn
if esn <> 'NONE':
res.elementSetName = esn
try:
for r in res:
print str(r)
except ZoomError, err:
print "Zoom exception", err.__class__, err
# res.delete ()
# Looks as if Oxford will close the connection if a delete is sent,
# despite claiming delete support (verified with yaz client, too).
except ZoomError, err:
print "Zoom exception", err.__class__, err
if query == '':
while 1:
q_str = raw_input ('CCL query: ')
if q_str == '': break
run_one (q_str)
else:
run_one (query)
conn.close ()

1484
python/atom/__init__.py Normal file

File diff suppressed because it is too large Load Diff

43
python/atom/auth.py Normal file
View File

@ -0,0 +1,43 @@
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import base64
class BasicAuth(object):
"""Sets the Authorization header as defined in RFC1945"""
def __init__(self, user_id, password):
self.basic_cookie = base64.encodestring(
'%s:%s' % (user_id, password)).strip()
def modify_request(self, http_request):
http_request.headers['Authorization'] = 'Basic %s' % self.basic_cookie
ModifyRequest = modify_request
class NoAuth(object):
def modify_request(self, http_request):
pass

182
python/atom/client.py Normal file
View File

@ -0,0 +1,182 @@
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomPubClient provides CRUD ops. in line with the Atom Publishing Protocol.
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.http_core
class Error(Exception):
pass
class MissingHost(Error):
pass
class AtomPubClient(object):
host = None
auth_token = None
ssl = False # Whether to force all requests over https
def __init__(self, http_client=None, host=None,
auth_token=None, source=None, **kwargs):
"""Creates a new AtomPubClient instance.
Args:
source: The name of your application.
http_client: An object capable of performing HTTP requests through a
request method. This object is used to perform the request
when the AtomPubClient's request method is called. Used to
allow HTTP requests to be directed to a mock server, or use
an alternate library instead of the default of httplib to
make HTTP requests.
host: str The default host name to use if a host is not specified in the
requested URI.
auth_token: An object which sets the HTTP Authorization header when its
modify_request method is called.
"""
self.http_client = http_client or atom.http_core.ProxiedHttpClient()
if host is not None:
self.host = host
if auth_token is not None:
self.auth_token = auth_token
self.source = source
def request(self, method=None, uri=None, auth_token=None,
http_request=None, **kwargs):
"""Performs an HTTP request to the server indicated.
Uses the http_client instance to make the request.
Args:
method: The HTTP method as a string, usually one of 'GET', 'POST',
'PUT', or 'DELETE'
uri: The URI desired as a string or atom.http_core.Uri.
http_request:
auth_token: An authorization token object whose modify_request method
sets the HTTP Authorization header.
Returns:
The results of calling self.http_client.request. With the default
http_client, this is an HTTP response object.
"""
# Modify the request based on the AtomPubClient settings and parameters
# passed in to the request.
http_request = self.modify_request(http_request)
if isinstance(uri, (str, unicode)):
uri = atom.http_core.Uri.parse_uri(uri)
if uri is not None:
uri.modify_request(http_request)
if isinstance(method, (str, unicode)):
http_request.method = method
# Any unrecognized arguments are assumed to be capable of modifying the
# HTTP request.
for name, value in kwargs.iteritems():
if value is not None:
value.modify_request(http_request)
# Default to an http request if the protocol scheme is not set.
if http_request.uri.scheme is None:
http_request.uri.scheme = 'http'
# Override scheme. Force requests over https.
if self.ssl:
http_request.uri.scheme = 'https'
if http_request.uri.path is None:
http_request.uri.path = '/'
# Add the Authorization header at the very end. The Authorization header
# value may need to be calculated using information in the request.
if auth_token:
auth_token.modify_request(http_request)
elif self.auth_token:
self.auth_token.modify_request(http_request)
# Check to make sure there is a host in the http_request.
if http_request.uri.host is None:
raise MissingHost('No host provided in request %s %s' % (
http_request.method, str(http_request.uri)))
# Perform the fully specified request using the http_client instance.
# Sends the request to the server and returns the server's response.
return self.http_client.request(http_request)
Request = request
def get(self, uri=None, auth_token=None, http_request=None, **kwargs):
"""Performs a request using the GET method, returns an HTTP response."""
return self.request(method='GET', uri=uri, auth_token=auth_token,
http_request=http_request, **kwargs)
Get = get
def post(self, uri=None, data=None, auth_token=None, http_request=None,
**kwargs):
"""Sends data using the POST method, returns an HTTP response."""
return self.request(method='POST', uri=uri, auth_token=auth_token,
http_request=http_request, data=data, **kwargs)
Post = post
def put(self, uri=None, data=None, auth_token=None, http_request=None,
**kwargs):
"""Sends data using the PUT method, returns an HTTP response."""
return self.request(method='PUT', uri=uri, auth_token=auth_token,
http_request=http_request, data=data, **kwargs)
Put = put
def delete(self, uri=None, auth_token=None, http_request=None, **kwargs):
"""Performs a request using the DELETE method, returns an HTTP response."""
return self.request(method='DELETE', uri=uri, auth_token=auth_token,
http_request=http_request, **kwargs)
Delete = delete
def modify_request(self, http_request):
"""Changes the HTTP request before sending it to the server.
Sets the User-Agent HTTP header and fills in the HTTP host portion
of the URL if one was not included in the request (for this it uses
the self.host member if one is set). This method is called in
self.request.
Args:
http_request: An atom.http_core.HttpRequest() (optional) If one is
not provided, a new HttpRequest is instantiated.
Returns:
An atom.http_core.HttpRequest() with the User-Agent header set and
if this client has a value in its host member, the host in the request
URL is set.
"""
if http_request is None:
http_request = atom.http_core.HttpRequest()
if self.host is not None and http_request.uri.host is None:
http_request.uri.host = self.host
# Set the user agent header for logging purposes.
if self.source:
http_request.headers['User-Agent'] = '%s gdata-py/2.0.12' % self.source
else:
http_request.headers['User-Agent'] = 'gdata-py/2.0.12'
return http_request
ModifyRequest = modify_request

545
python/atom/core.py Normal file
View File

@ -0,0 +1,545 @@
#!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from xml.dom.minidom import parseString as xmlString
except ImportError:
xmlString = None
STRING_ENCODING = 'utf-8'
class XmlElement(object):
"""Represents an element node in an XML document.
The text member is a UTF-8 encoded str or unicode.
"""
_qname = None
_other_elements = None
_other_attributes = None
# The rule set contains mappings for XML qnames to child members and the
# appropriate member classes.
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
"""Initializes the _rule_set for the class which is used when parsing XML.
This method is used internally for parsing and generating XML for an
XmlElement. It is not recommended that you call this method directly.
Returns:
A tuple containing the XML parsing rules for the appropriate version.
The tuple looks like:
(qname, {sub_element_qname: (member_name, member_class, repeating), ..},
{attribute_qname: member_name})
To give a couple of concrete example, the atom.data.Control _get_rules
with version of 2 will return:
('{http://www.w3.org/2007/app}control',
{'{http://www.w3.org/2007/app}draft': ('draft',
<class 'atom.data.Draft'>,
False)},
{})
Calling _get_rules with version 1 on gdata.data.FeedLink will produce:
('{http://schemas.google.com/g/2005}feedLink',
{'{http://www.w3.org/2005/Atom}feed': ('feed',
<class 'gdata.data.GDFeed'>,
False)},
{'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint',
'rel': 'rel'})
"""
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
# FindExtensions and FindChildren are provided for backwards compatibility
# to the atom.AtomBase class.
# However, FindExtensions may return more results than the v1 atom.AtomBase
# method does, because get_elements searches both the expected children
# and the unexpected "other elements". The old AtomBase.FindExtensions
# method searched only "other elements" AKA extension_elements.
FindExtensions = get_elements
FindChildren = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
if isinstance(attribute_def, (list, tuple)):
attribute_def = attribute_def[0]
member = getattr(self, attribute_def)
# TODO: ensure this hasn't broken existing behavior.
#member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1], version))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1], version))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement,
version))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1, encoding=None):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version, encoding)
return new_tree
def _attach_members(self, tree, version=1, encoding=None):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
encoding: str (optional)
"""
qname, elements, attributes = self.__class__._get_rules(version)
encoding = encoding or STRING_ENCODING
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
# I'm not sure if unicode can be used in the attribute name, so for now
# we assume the encoding is correct for the attribute name.
if not isinstance(value, unicode):
value = value.decode(encoding)
tree.attrib[key] = value
if self.text:
if isinstance(self.text, unicode):
tree.text = self.text
else:
tree.text = self.text.decode(encoding)
def to_string(self, version=1, encoding=None, pretty_print=None):
"""Converts this object to XML."""
tree_string = ElementTree.tostring(self._to_tree(version, encoding))
if pretty_print and xmlString is not None:
return xmlString(tree_string).toprettyxml()
return tree_string
ToString = to_string
def __str__(self):
return self.to_string()
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def __get_extension_elements(self):
return self._other_elements
def __set_extension_elements(self, elements):
self._other_elements = elements
extension_elements = property(__get_extension_elements,
__set_extension_elements,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def __get_extension_attributes(self):
return self._other_attributes
def __set_extension_attributes(self, attributes):
self._other_attributes = attributes
extension_attributes = property(__get_extension_attributes,
__set_extension_attributes,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def _get_tag(self, version=1):
qname = _get_qname(self, version)
return qname[qname.find('}')+1:]
def _get_namespace(self, version=1):
qname = _get_qname(self, version)
if qname.startswith('{'):
return qname[1:qname.find('}')]
else:
return None
def _set_tag(self, tag):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if self._qname[0].startswith('{'):
self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag)
else:
self._qname[0] = tag
else:
if self._qname.startswith('{'):
self._qname = '{%s}%s' % (self._get_namespace(), tag)
else:
self._qname = tag
def _set_namespace(self, namespace):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if namespace:
self._qname[0] = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname[0] = self._get_tag(1)
else:
if namespace:
self._qname = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname = self._get_tag(1)
tag = property(_get_tag, _set_tag,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
namespace = property(_get_namespace, _set_namespace,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
# Provided for backwards compatibility to atom.ExtensionElement
children = extension_elements
attributes = extension_attributes
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def parse(xml_string, target_class=None, version=1, encoding=None):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass. If None is specified, the
XmlElement class is used.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
encoding: str (optional) The character encoding of the bytes in the
xml_string. Default is 'UTF-8'.
"""
if target_class is None:
target_class = XmlElement
if isinstance(xml_string, unicode):
if encoding is None:
xml_string = xml_string.encode(STRING_ENCODING)
else:
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
Parse = parse
xml_element_from_string = parse
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == _get_qname(target_class, version):
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value

340
python/atom/data.py Normal file
View File

@ -0,0 +1,340 @@
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
XML_TEMPLATE = '{http://www.w3.org/XML/1998/namespace}%s'
ATOM_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_TEMPLATE_V1 = '{http://purl.org/atom/app#}%s'
APP_TEMPLATE_V2 = '{http://www.w3.org/2007/app}%s'
class Name(atom.core.XmlElement):
"""The atom:name element."""
_qname = ATOM_TEMPLATE % 'name'
class Email(atom.core.XmlElement):
"""The atom:email element."""
_qname = ATOM_TEMPLATE % 'email'
class Uri(atom.core.XmlElement):
"""The atom:uri element."""
_qname = ATOM_TEMPLATE % 'uri'
class Person(atom.core.XmlElement):
"""A foundation class which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
name = Name
email = Email
uri = Uri
class Author(Person):
"""The atom:author element.
An author is a required element in Feed unless each Entry contains an Author.
"""
_qname = ATOM_TEMPLATE % 'author'
class Contributor(Person):
"""The atom:contributor element."""
_qname = ATOM_TEMPLATE % 'contributor'
class Link(atom.core.XmlElement):
"""The atom:link element."""
_qname = ATOM_TEMPLATE % 'link'
href = 'href'
rel = 'rel'
type = 'type'
hreflang = 'hreflang'
title = 'title'
length = 'length'
class Generator(atom.core.XmlElement):
"""The atom:generator element."""
_qname = ATOM_TEMPLATE % 'generator'
uri = 'uri'
version = 'version'
class Text(atom.core.XmlElement):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
type = 'type'
class Title(Text):
"""The atom:title element."""
_qname = ATOM_TEMPLATE % 'title'
class Subtitle(Text):
"""The atom:subtitle element."""
_qname = ATOM_TEMPLATE % 'subtitle'
class Rights(Text):
"""The atom:rights element."""
_qname = ATOM_TEMPLATE % 'rights'
class Summary(Text):
"""The atom:summary element."""
_qname = ATOM_TEMPLATE % 'summary'
class Content(Text):
"""The atom:content element."""
_qname = ATOM_TEMPLATE % 'content'
src = 'src'
class Category(atom.core.XmlElement):
"""The atom:category element."""
_qname = ATOM_TEMPLATE % 'category'
term = 'term'
scheme = 'scheme'
label = 'label'
class Id(atom.core.XmlElement):
"""The atom:id element."""
_qname = ATOM_TEMPLATE % 'id'
class Icon(atom.core.XmlElement):
"""The atom:icon element."""
_qname = ATOM_TEMPLATE % 'icon'
class Logo(atom.core.XmlElement):
"""The atom:logo element."""
_qname = ATOM_TEMPLATE % 'logo'
class Draft(atom.core.XmlElement):
"""The app:draft element which indicates if this entry should be public."""
_qname = (APP_TEMPLATE_V1 % 'draft', APP_TEMPLATE_V2 % 'draft')
class Control(atom.core.XmlElement):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_qname = (APP_TEMPLATE_V1 % 'control', APP_TEMPLATE_V2 % 'control')
draft = Draft
class Date(atom.core.XmlElement):
"""A parent class for atom:updated, published, etc."""
class Updated(Date):
"""The atom:updated element."""
_qname = ATOM_TEMPLATE % 'updated'
class Published(Date):
"""The atom:published element."""
_qname = ATOM_TEMPLATE % 'published'
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def find_url(self, rel):
"""Returns the URL in a link with the desired rel value."""
for link in self.link:
if link.rel == rel and link.href:
return link.href
return None
FindUrl = find_url
def get_link(self, rel):
"""Returns a link object which has the desired rel value.
If you are interested in the URL instead of the link object,
consider using find_url instead.
"""
for link in self.link:
if link.rel == rel and link.href:
return link
return None
GetLink = get_link
def find_self_link(self):
"""Find the first link with rel set to 'self'
Returns:
A str containing the link's href or None if none of the links had rel
equal to 'self'
"""
return self.find_url('self')
FindSelfLink = find_self_link
def get_self_link(self):
return self.get_link('self')
GetSelfLink = get_self_link
def find_edit_link(self):
return self.find_url('edit')
FindEditLink = find_edit_link
def get_edit_link(self):
return self.get_link('edit')
GetEditLink = get_edit_link
def find_edit_media_link(self):
link = self.find_url('edit-media')
# Search for media-edit as well since Picasa API used media-edit instead.
if link is None:
return self.find_url('media-edit')
return link
FindEditMediaLink = find_edit_media_link
def get_edit_media_link(self):
link = self.get_link('edit-media')
if link is None:
return self.get_link('media-edit')
return link
GetEditMediaLink = get_edit_media_link
def find_next_link(self):
return self.find_url('next')
FindNextLink = find_next_link
def get_next_link(self):
return self.get_link('next')
GetNextLink = get_next_link
def find_license_link(self):
return self.find_url('license')
FindLicenseLink = find_license_link
def get_license_link(self):
return self.get_link('license')
GetLicenseLink = get_license_link
def find_alternate_link(self):
return self.find_url('alternate')
FindAlternateLink = find_alternate_link
def get_alternate_link(self):
return self.get_link('alternate')
GetAlternateLink = get_alternate_link
class FeedEntryParent(atom.core.XmlElement, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
author = [Author]
category = [Category]
contributor = [Contributor]
id = Id
link = [Link]
rights = Rights
title = Title
updated = Updated
def __init__(self, atom_id=None, text=None, *args, **kwargs):
if atom_id is not None:
self.id = atom_id
atom.core.XmlElement.__init__(self, text=text, *args, **kwargs)
class Source(FeedEntryParent):
"""The atom:source element."""
_qname = ATOM_TEMPLATE % 'source'
generator = Generator
icon = Icon
logo = Logo
subtitle = Subtitle
class Entry(FeedEntryParent):
"""The atom:entry element."""
_qname = ATOM_TEMPLATE % 'entry'
content = Content
published = Published
source = Source
summary = Summary
control = Control
class Feed(Source):
"""The atom:feed element which contains entries."""
_qname = ATOM_TEMPLATE % 'feed'
entry = [Entry]
class ExtensionElement(atom.core.XmlElement):
"""Provided for backwards compatibility to the v1 atom.ExtensionElement."""
def __init__(self, tag=None, namespace=None, attributes=None,
children=None, text=None, *args, **kwargs):
if namespace:
self._qname = '{%s}%s' % (namespace, tag)
else:
self._qname = tag
self.children = children or []
self.attributes = attributes or {}
self.text = text
_BecomeChildElement = atom.core.XmlElement._become_child

318
python/atom/http.py Normal file
View File

@ -0,0 +1,318 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import atom.url
import atom.http_interface
import socket
import base64
import atom.http_core
ssl_imported = False
ssl = None
try:
import ssl
ssl_imported = True
except ImportError:
pass
class ProxyError(atom.http_interface.Error):
pass
class TestConfigurationError(Exception):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
# Added to allow old v1 HttpClient objects to use the new
# http_code.HttpClient. Used in unit tests to inject a mock client.
v2_http_client = None
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringTypes):
all_headers['Content-Length'] = str(len(data))
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
if self.v2_http_client is not None:
http_request = atom.http_core.HttpRequest(method=operation)
atom.http_core.Uri.parse_uri(str(url)).modify_request(http_request)
http_request.headers = all_headers
if data:
http_request._body_parts.append(data)
return self.v2_http_client.request(http_request=http_request)
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
if url.port is not None:
connection.putheader('Host', '%s:%s' % (url.host, url.port))
else:
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable.
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_auth = _get_proxy_auth()
if url.protocol == 'https':
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = ''
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl_imported:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, None)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock = sslobj
return connection
else:
# The request was HTTPS, but there was no https_proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy = os.environ.get('http_proxy')
if proxy:
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
else:
# The request was HTTP, but there was no http_proxy set.
return HttpClient._prepare_connection(self, url, headers)
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth():
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _send_data_part(data, connection):
if isinstance(data, types.StringTypes):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return

597
python/atom/http_core.py Normal file
View File

@ -0,0 +1,597 @@
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# TODO: add proxy handling.
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import StringIO
import urlparse
import urllib
import httplib
ssl = None
try:
import ssl
except ImportError:
pass
class Error(Exception):
pass
class UnknownSize(Error):
pass
class ProxyError(Error):
pass
MIME_BOUNDARY = 'END_OF_PART'
def get_headers(http_response):
"""Retrieves all HTTP headers from an HTTP response from the server.
This method is provided for backwards compatibility for Python2.2 and 2.3.
The httplib.HTTPResponse object in 2.2 and 2.3 does not have a getheaders
method so this function will use getheaders if available, but if not it
will retrieve a few using getheader.
"""
if hasattr(http_response, 'getheaders'):
return http_response.getheaders()
else:
headers = []
for header in (
'location', 'content-type', 'content-length', 'age', 'allow',
'cache-control', 'content-location', 'content-encoding', 'date',
'etag', 'expires', 'last-modified', 'pragma', 'server',
'set-cookie', 'transfer-encoding', 'vary', 'via', 'warning',
'www-authenticate', 'gdata-version'):
value = http_response.getheader(header, None)
if value is not None:
headers.append((header, value))
return headers
class HttpRequest(object):
"""Contains all of the parameters for an HTTP 1.1 request.
The HTTP headers are represented by a dictionary, and it is the
responsibility of the user to ensure that duplicate field names are combined
into one header value according to the rules in section 4.2 of RFC 2616.
"""
method = None
uri = None
def __init__(self, uri=None, method=None, headers=None):
"""Construct an HTTP request.
Args:
uri: The full path or partial path as a Uri object or a string.
method: The HTTP method for the request, examples include 'GET', 'POST',
etc.
headers: dict of strings The HTTP headers to include in the request.
"""
self.headers = headers or {}
self._body_parts = []
if method is not None:
self.method = method
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
self.uri = uri or Uri()
def add_body_part(self, data, mime_type, size=None):
"""Adds data to the HTTP request body.
If more than one part is added, this is assumed to be a mime-multipart
request. This method is designed to create MIME 1.0 requests as specified
in RFC 1341.
Args:
data: str or a file-like object containing a part of the request body.
mime_type: str The MIME type describing the data
size: int Required if the data is a file like object. If the data is a
string, the size is calculated so this parameter is ignored.
"""
if isinstance(data, str):
size = len(data)
if size is None:
# TODO: support chunked transfer if some of the body is of unknown size.
raise UnknownSize('Each part of the body must have a known size.')
if 'Content-Length' in self.headers:
content_length = int(self.headers['Content-Length'])
else:
content_length = 0
# If this is the first part added to the body, then this is not a multipart
# request.
if len(self._body_parts) == 0:
self.headers['Content-Type'] = mime_type
content_length = size
self._body_parts.append(data)
elif len(self._body_parts) == 1:
# This is the first member in a mime-multipart request, so change the
# _body_parts list to indicate a multipart payload.
self._body_parts.insert(0, 'Media multipart posting')
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
content_length += len(boundary_string) + size
self._body_parts.insert(1, boundary_string)
content_length += len('Media multipart posting')
# Put the content type of the first part of the body into the multipart
# payload.
original_type_string = 'Content-Type: %s\r\n\r\n' % (
self.headers['Content-Type'],)
self._body_parts.insert(2, original_type_string)
content_length += len(original_type_string)
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.append(boundary_string)
content_length += len(boundary_string)
# Change the headers to indicate this is now a mime multipart request.
self.headers['Content-Type'] = 'multipart/related; boundary="%s"' % (
MIME_BOUNDARY,)
self.headers['MIME-version'] = '1.0'
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.append(type_string)
content_length += len(type_string)
self._body_parts.append(data)
ending_boundary_string = '\r\n--%s--' % (MIME_BOUNDARY,)
self._body_parts.append(ending_boundary_string)
content_length += len(ending_boundary_string)
else:
# This is a mime multipart request.
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.insert(-1, boundary_string)
content_length += len(boundary_string) + size
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.insert(-1, type_string)
content_length += len(type_string)
self._body_parts.insert(-1, data)
self.headers['Content-Length'] = str(content_length)
# I could add an "append_to_body_part" method as well.
AddBodyPart = add_body_part
def add_form_inputs(self, form_data,
mime_type='application/x-www-form-urlencoded'):
"""Form-encodes and adds data to the request body.
Args:
form_data: dict or sequnce or two member tuples which contains the
form keys and values.
mime_type: str The MIME type of the form data being sent. Defaults
to 'application/x-www-form-urlencoded'.
"""
body = urllib.urlencode(form_data)
self.add_body_part(body, mime_type)
AddFormInputs = add_form_inputs
def _copy(self):
"""Creates a deep copy of this request."""
copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port,
self.uri.path, self.uri.query.copy())
new_request = HttpRequest(uri=copied_uri, method=self.method,
headers=self.headers.copy())
new_request._body_parts = self._body_parts[:]
return new_request
def _dump(self):
"""Converts to a printable string for debugging purposes.
In order to preserve the request, it does not read from file-like objects
in the body.
"""
output = 'HTTP Request\n method: %s\n url: %s\n headers:\n' % (
self.method, str(self.uri))
for header, value in self.headers.iteritems():
output += ' %s: %s\n' % (header, value)
output += ' body sections:\n'
i = 0
for part in self._body_parts:
if isinstance(part, (str, unicode)):
output += ' %s: %s\n' % (i, part)
else:
output += ' %s: <file like object>\n' % i
i += 1
return output
def _apply_defaults(http_request):
if http_request.uri.scheme is None:
if http_request.uri.port == 443:
http_request.uri.scheme = 'https'
else:
http_request.uri.scheme = 'http'
class Uri(object):
"""A URI as used in HTTP 1.1"""
scheme = None
host = None
port = None
path = None
def __init__(self, scheme=None, host=None, port=None, path=None, query=None):
"""Constructor for a URI.
Args:
scheme: str This is usually 'http' or 'https'.
host: str The host name or IP address of the desired server.
post: int The server's port number.
path: str The path of the resource following the host. This begins with
a /, example: '/calendar/feeds/default/allcalendars/full'
query: dict of strings The URL query parameters. The keys and values are
both escaped so this dict should contain the unescaped values.
For example {'my key': 'val', 'second': '!!!'} will become
'?my+key=val&second=%21%21%21' which is appended to the path.
"""
self.query = query or {}
if scheme is not None:
self.scheme = scheme
if host is not None:
self.host = host
if port is not None:
self.port = port
if path:
self.path = path
def _get_query_string(self):
param_pairs = []
for key, value in self.query.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def _get_relative_path(self):
"""Returns the path with the query parameters escaped and appended."""
param_string = self._get_query_string()
if self.path is None:
path = '/'
else:
path = self.path
if param_string:
return '?'.join([path, param_string])
else:
return path
def _to_string(self):
if self.scheme is None and self.port == 443:
scheme = 'https'
elif self.scheme is None:
scheme = 'http'
else:
scheme = self.scheme
if self.path is None:
path = '/'
else:
path = self.path
if self.port is None:
return '%s://%s%s' % (scheme, self.host, self._get_relative_path())
else:
return '%s://%s:%s%s' % (scheme, self.host, str(self.port),
self._get_relative_path())
def __str__(self):
return self._to_string()
def modify_request(self, http_request=None):
"""Sets HTTP request components based on the URI."""
if http_request is None:
http_request = HttpRequest()
if http_request.uri is None:
http_request.uri = Uri()
# Determine the correct scheme.
if self.scheme:
http_request.uri.scheme = self.scheme
if self.port:
http_request.uri.port = self.port
if self.host:
http_request.uri.host = self.host
# Set the relative uri path
if self.path:
http_request.uri.path = self.path
if self.query:
http_request.uri.query = self.query.copy()
return http_request
ModifyRequest = modify_request
def parse_uri(uri_string):
"""Creates a Uri object which corresponds to the URI string.
This method can accept partial URIs, but it will leave missing
members of the Uri unset.
"""
parts = urlparse.urlparse(uri_string)
uri = Uri()
if parts[0]:
uri.scheme = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
uri.host = host_parts[0]
if len(host_parts) > 1:
uri.port = int(host_parts[1])
if parts[2]:
uri.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = None
return uri
parse_uri = staticmethod(parse_uri)
ParseUri = parse_uri
parse_uri = Uri.parse_uri
ParseUri = Uri.parse_uri
class HttpResponse(object):
status = None
reason = None
_body = None
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def getheaders(self):
return self._headers
def read(self, amt=None):
if self._body is None:
return None
if not amt:
return self._body.read()
else:
return self._body.read(amt)
def _dump_response(http_response):
"""Converts to a string for printing debug messages.
Does not read the body since that may consume the content.
"""
output = 'HttpResponse\n status: %s\n reason: %s\n headers:' % (
http_response.status, http_response.reason)
headers = get_headers(http_response)
if isinstance(headers, dict):
for header, value in headers.iteritems():
output += ' %s: %s\n' % (header, value)
else:
for pair in headers:
output += ' %s: %s\n' % (pair[0], pair[1])
return output
class HttpClient(object):
"""Performs HTTP requests using httplib."""
debug = None
def request(self, http_request):
return self._http_request(http_request.method, http_request.uri,
http_request.headers, http_request._body_parts)
Request = request
def _get_connection(self, uri, headers=None):
"""Opens a socket connection to the server to set up an HTTP request.
Args:
uri: The full URL for the request as a Uri object.
headers: A dict of string pairs containing the HTTP headers for the
request.
"""
connection = None
if uri.scheme == 'https':
if not uri.port:
connection = httplib.HTTPSConnection(uri.host)
else:
connection = httplib.HTTPSConnection(uri.host, int(uri.port))
else:
if not uri.port:
connection = httplib.HTTPConnection(uri.host)
else:
connection = httplib.HTTPConnection(uri.host, int(uri.port))
return connection
def _http_request(self, method, uri, headers=None, body_parts=None):
"""Makes an HTTP request using httplib.
Args:
method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc.
uri: str or atom.http_core.Uri
headers: dict of strings mapping to strings which will be sent as HTTP
headers in the request.
body_parts: list of strings, objects with a read method, or objects
which can be converted to strings using str. Each of these
will be sent in order as the body of the HTTP request.
"""
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
connection = self._get_connection(uri, headers=headers)
if self.debug:
connection.debuglevel = 1
if connection.host != uri.host:
connection.putrequest(method, str(uri))
else:
connection.putrequest(method, uri._get_relative_path())
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (uri.scheme == 'https' and int(uri.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % uri.host
replacement_header_line = 'Host: %s' % uri.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name, value in headers.iteritems():
connection.putheader(header_name, value)
connection.endheaders()
# If there is data, send it in the request.
if body_parts:
for part in body_parts:
_send_data_part(part, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _send_data_part(data, connection):
if isinstance(data, (str, unicode)):
# I might want to just allow str, not unicode.
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
class ProxiedHttpClient(HttpClient):
def _get_connection(self, uri, headers=None):
# Check to see if there are proxy settings required for this request.
proxy = None
if uri.scheme == 'https':
proxy = os.environ.get('https_proxy')
elif uri.scheme == 'http':
proxy = os.environ.get('http_proxy')
if not proxy:
return HttpClient._get_connection(self, uri, headers=headers)
# Now we have the URL of the appropriate proxy server.
# Get a username and password for the proxy if required.
proxy_auth = _get_proxy_auth()
if uri.scheme == 'https':
import socket
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = uri.port
if not port:
port = 443
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (uri.host, port)
# Set the user agent to send to the proxy
user_agent = ''
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_uri.host, int(proxy_uri.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl is not None:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, Nonesock_)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_uri.host)
connection.sock = sslobj
return connection
elif uri.scheme == 'http':
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_uri.host, int(proxy_uri.port))
return None
def _get_proxy_auth():
import base64
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.b64encode('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''

View File

@ -0,0 +1,156 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a common interface for all HTTP requests.
HttpResponse: Represents the server's response to an HTTP request. Provides
an interface identical to httplib.HTTPResponse which is the response
expected from higher level classes which use HttpClient.request.
GenericHttpClient: Provides an interface (superclass) for an object
responsible for making HTTP requests. Subclasses of this object are
used in AtomService and GDataService to make requests to the server. By
changing the http_client member object, the AtomService is able to make
HTTP requests using different logic (for example, when running on
Google App Engine, the http_client makes requests using the App Engine
urlfetch API).
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
USER_AGENT = '%s GData-Python/2.0.12'
class Error(Exception):
pass
class UnparsableUrlObject(Error):
pass
class ContentLengthRequired(Error):
pass
class HttpResponse(object):
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Constructor for an HttpResponse object.
HttpResponse represents the server's response to an HTTP request from
the client. The HttpClient.request method returns a httplib.HTTPResponse
object and this HttpResponse class is designed to mirror the interface
exposed by httplib.HTTPResponse.
Args:
body: A file like object, with a read() method. The body could also
be a string, and the constructor will wrap it so that
HttpResponse.read(self) will return the full string.
status: The HTTP status code as an int. Example: 200, 201, 404.
reason: The HTTP status message which follows the code. Example:
OK, Created, Not Found
headers: A dictionary containing the HTTP headers in the server's
response. A common header in the response is Content-Length.
"""
if body:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
else:
self._body = None
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def read(self, amt=None):
if not amt:
return self._body.read()
else:
return self._body.read(amt)
class GenericHttpClient(object):
debug = False
def __init__(self, http_client, headers=None):
"""
Args:
http_client: An object which provides a request method to make an HTTP
request. The request method in GenericHttpClient performs a
call-through to the contained HTTP client object.
headers: A dictionary containing HTTP headers which should be included
in every HTTP request. Common persistent headers include
'User-Agent'.
"""
self.http_client = http_client
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
return self.http_client.request(operation, url, data=data,
headers=all_headers)
def get(self, url, headers=None):
return self.request('GET', url, headers=headers)
def post(self, url, data, headers=None):
return self.request('POST', url, data=data, headers=headers)
def put(self, url, data, headers=None):
return self.request('PUT', url, data=data, headers=headers)
def delete(self, url, headers=None):
return self.request('DELETE', url, headers=headers)
class GenericToken(object):
"""Represents an Authorization token to be added to HTTP requests.
Some Authorization headers included calculated fields (digital
signatures for example) which are based on the parameters of the HTTP
request. Therefore the token is responsible for signing the request
and adding the Authorization header.
"""
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""For the GenericToken, no Authorization token is set."""
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
Since the generic token doesn't add an auth header, it is not valid for
any scope.
"""
return False

132
python/atom/mock_http.py Normal file
View File

@ -0,0 +1,132 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockRequest(object):
"""Holds parameters of an HTTP request for matching against future requests.
"""
def __init__(self, operation, url, data=None, headers=None):
self.operation = operation
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
self.url = url
self.data = data
self.headers = headers
class MockResponse(atom.http_interface.HttpResponse):
"""Simulates an httplib.HTTPResponse object."""
def __init__(self, body=None, status=None, reason=None, headers=None):
if body and hasattr(body, 'read'):
self.body = body.read()
else:
self.body = body
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def read(self):
return self.body
class MockHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None, recordings=None, real_client=None):
"""An HttpClient which responds to request with stored data.
The request-response pairs are stored as tuples in a member list named
recordings.
The MockHttpClient can be switched from replay mode to record mode by
setting the real_client member to an instance of an HttpClient which will
make real HTTP requests and store the server's response in list of
recordings.
Args:
headers: dict containing HTTP headers which should be included in all
HTTP requests.
recordings: The initial recordings to be used for responses. This list
contains tuples in the form: (MockRequest, MockResponse)
real_client: An HttpClient which will make a real HTTP request. The
response will be converted into a MockResponse and stored in
recordings.
"""
self.recordings = recordings or []
self.real_client = real_client
self.headers = headers or {}
def add_response(self, response, operation, url, data=None, headers=None):
"""Adds a request-response pair to the recordings list.
After the recording is added, future matching requests will receive the
response.
Args:
response: MockResponse
operation: str
url: str
data: str, Currently the data is ignored when looking for matching
requests.
headers: dict of strings: Currently the headers are ignored when
looking for matching requests.
"""
request = MockRequest(operation, url, data=data, headers=headers)
self.recordings.append((request, response))
def request(self, operation, url, data=None, headers=None):
"""Returns a matching MockResponse from the recordings.
If the real_client is set, the request will be passed along and the
server's response will be added to the recordings and also returned.
If there is no match, a NoRecordingFound error will be raised.
"""
if self.real_client is None:
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for recording in self.recordings:
if recording[0].operation == operation and recording[0].url == url:
return recording[1]
raise NoRecordingFound('No recodings found for %s %s' % (
operation, url))
else:
# There is a real HTTP client, so make the request, and record the
# response.
response = self.real_client.request(operation, url, data=data,
headers=headers)
# TODO: copy the headers
stored_response = MockResponse(body=response, status=response.status,
reason=response.reason)
self.add_response(stored_response, operation, url, data=data,
headers=headers)
return stored_response

View File

@ -0,0 +1,323 @@
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import StringIO
import pickle
import os.path
import tempfile
import atom.http_core
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockHttpClient(object):
debug = None
real_client = None
last_request_was_live = False
# The following members are used to construct the session cache temp file
# name.
# These are combined to form the file name
# /tmp/cache_prefix.cache_case_name.cache_test_name
cache_name_prefix = 'gdata_live_test'
cache_case_name = ''
cache_test_name = ''
def __init__(self, recordings=None, real_client=None):
self._recordings = recordings or []
if real_client is not None:
self.real_client = real_client
def add_response(self, http_request, status, reason, headers=None,
body=None):
response = MockHttpResponse(status, reason, headers, body)
# TODO Scrub the request and the response.
self._recordings.append((http_request._copy(), response))
AddResponse = add_response
def request(self, http_request):
"""Provide a recorded response, or record a response for replay.
If the real_client is set, the request will be made using the
real_client, and the response from the server will be recorded.
If the real_client is None (the default), this method will examine
the recordings and find the first which matches.
"""
request = http_request._copy()
_scrub_request(request)
if self.real_client is None:
self.last_request_was_live = False
for recording in self._recordings:
if _match_request(recording[0], request):
return recording[1]
else:
# Pass along the debug settings to the real client.
self.real_client.debug = self.debug
# Make an actual request since we can use the real HTTP client.
self.last_request_was_live = True
response = self.real_client.request(http_request)
scrubbed_response = _scrub_response(response)
self.add_response(request, scrubbed_response.status,
scrubbed_response.reason,
dict(atom.http_core.get_headers(scrubbed_response)),
scrubbed_response.read())
# Return the recording which we just added.
return self._recordings[-1][1]
raise NoRecordingFound('No recoding was found for request: %s %s' % (
request.method, str(request.uri)))
Request = request
def _save_recordings(self, filename):
recording_file = open(os.path.join(tempfile.gettempdir(), filename),
'wb')
pickle.dump(self._recordings, recording_file)
recording_file.close()
def _load_recordings(self, filename):
recording_file = open(os.path.join(tempfile.gettempdir(), filename),
'rb')
self._recordings = pickle.load(recording_file)
recording_file.close()
def _delete_recordings(self, filename):
full_path = os.path.join(tempfile.gettempdir(), filename)
if os.path.exists(full_path):
os.remove(full_path)
def _load_or_use_client(self, filename, http_client):
if os.path.exists(os.path.join(tempfile.gettempdir(), filename)):
self._load_recordings(filename)
else:
self.real_client = http_client
def use_cached_session(self, name=None, real_http_client=None):
"""Attempts to load recordings from a previous live request.
If a temp file with the recordings exists, then it is used to fulfill
requests. If the file does not exist, then a real client is used to
actually make the desired HTTP requests. Requests and responses are
recorded and will be written to the desired temprary cache file when
close_session is called.
Args:
name: str (optional) The file name of session file to be used. The file
is loaded from the temporary directory of this machine. If no name
is passed in, a default name will be constructed using the
cache_name_prefix, cache_case_name, and cache_test_name of this
object.
real_http_client: atom.http_core.HttpClient the real client to be used
if the cached recordings are not found. If the default
value is used, this will be an
atom.http_core.HttpClient.
"""
if real_http_client is None:
real_http_client = atom.http_core.HttpClient()
if name is None:
self._recordings_cache_name = self.get_cache_file_name()
else:
self._recordings_cache_name = name
self._load_or_use_client(self._recordings_cache_name, real_http_client)
def close_session(self):
"""Saves recordings in the temporary file named in use_cached_session."""
if self.real_client is not None:
self._save_recordings(self._recordings_cache_name)
def delete_session(self, name=None):
"""Removes recordings from a previous live request."""
if name is None:
self._delete_recordings(self._recordings_cache_name)
else:
self._delete_recordings(name)
def get_cache_file_name(self):
return '%s.%s.%s' % (self.cache_name_prefix, self.cache_case_name,
self.cache_test_name)
def _dump(self):
"""Provides debug information in a string."""
output = 'MockHttpClient\n real_client: %s\n cache file name: %s\n' % (
self.real_client, self.get_cache_file_name())
output += ' recordings:\n'
i = 0
for recording in self._recordings:
output += ' recording %i is for: %s %s\n' % (
i, recording[0].method, str(recording[0].uri))
i += 1
return output
def _match_request(http_request, stored_request):
"""Determines whether a request is similar enough to a stored request
to cause the stored response to be returned."""
# Check to see if the host names match.
if (http_request.uri.host is not None
and http_request.uri.host != stored_request.uri.host):
return False
# Check the request path in the URL (/feeds/private/full/x)
elif http_request.uri.path != stored_request.uri.path:
return False
# Check the method used in the request (GET, POST, etc.)
elif http_request.method != stored_request.method:
return False
# If there is a gsession ID in either request, make sure that it is matched
# exactly.
elif ('gsessionid' in http_request.uri.query
or 'gsessionid' in stored_request.uri.query):
if 'gsessionid' not in stored_request.uri.query:
return False
elif 'gsessionid' not in http_request.uri.query:
return False
elif (http_request.uri.query['gsessionid']
!= stored_request.uri.query['gsessionid']):
return False
# Ignores differences in the query params (?start-index=5&max-results=20),
# the body of the request, the port number, HTTP headers, just to name a
# few.
return True
def _scrub_request(http_request):
""" Removes email address and password from a client login request.
Since the mock server saves the request and response in plantext, sensitive
information like the password should be removed before saving the
recordings. At the moment only requests sent to a ClientLogin url are
scrubbed.
"""
if (http_request and http_request.uri and http_request.uri.path and
http_request.uri.path.endswith('ClientLogin')):
# Remove the email and password from a ClientLogin request.
http_request._body_parts = []
http_request.add_form_inputs(
{'form_data': 'client login request has been scrubbed'})
else:
# We can remove the body of the post from the recorded request, since
# the request body is not used when finding a matching recording.
http_request._body_parts = []
return http_request
def _scrub_response(http_response):
return http_response
class EchoHttpClient(object):
"""Sends the request data back in the response.
Used to check the formatting of the request as it was sent. Always responds
with a 200 OK, and some information from the HTTP request is returned in
special Echo-X headers in the response. The following headers are added
in the response:
'Echo-Host': The host name and port number to which the HTTP connection is
made. If no port was passed in, the header will contain
host:None.
'Echo-Uri': The path portion of the URL being requested. /example?x=1&y=2
'Echo-Scheme': The beginning of the URL, usually 'http' or 'https'
'Echo-Method': The HTTP method being used, 'GET', 'POST', 'PUT', etc.
"""
def request(self, http_request):
return self._http_request(http_request.uri, http_request.method,
http_request.headers, http_request._body_parts)
def _http_request(self, uri, method, headers=None, body_parts=None):
body = StringIO.StringIO()
response = atom.http_core.HttpResponse(status=200, reason='OK', body=body)
if headers is None:
response._headers = {}
else:
# Copy headers from the request to the response but convert values to
# strings. Server response headers always come in as strings, so an int
# should be converted to a corresponding string when echoing.
for header, value in headers.iteritems():
response._headers[header] = str(value)
response._headers['Echo-Host'] = '%s:%s' % (uri.host, str(uri.port))
response._headers['Echo-Uri'] = uri._get_relative_path()
response._headers['Echo-Scheme'] = uri.scheme
response._headers['Echo-Method'] = method
for part in body_parts:
if isinstance(part, str):
body.write(part)
elif hasattr(part, 'read'):
body.write(part.read())
body.seek(0)
return response
class SettableHttpClient(object):
"""An HTTP Client which responds with the data given in set_response."""
def __init__(self, status, reason, body, headers):
"""Configures the response for the server.
See set_response for details on the arguments to the constructor.
"""
self.set_response(status, reason, body, headers)
self.last_request = None
def set_response(self, status, reason, body, headers):
"""Determines the response which will be sent for each request.
Args:
status: An int for the HTTP status code, example: 200, 404, etc.
reason: String for the HTTP reason, example: OK, NOT FOUND, etc.
body: The body of the HTTP response as a string or a file-like
object (something with a read method).
headers: dict of strings containing the HTTP headers in the response.
"""
self.response = atom.http_core.HttpResponse(status=status, reason=reason,
body=body)
self.response._headers = headers.copy()
def request(self, http_request):
self.last_request = http_request
return self.response
class MockHttpResponse(atom.http_core.HttpResponse):
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
# Instead of using a file-like object for the body, store as a string
# so that reads can be repeated.
if hasattr(body, 'read'):
self._body = body.read()
else:
self._body = body
def read(self):
return self._body

243
python/atom/mock_service.py Normal file
View File

@ -0,0 +1,243 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MockService provides CRUD ops. for mocking calls to AtomPub services.
MockService: Exposes the publicly used methods of AtomService to provide
a mock interface which can be used in unit tests.
"""
import atom.service
import pickle
__author__ = 'api.jscudder (Jeffrey Scudder)'
# Recordings contains pairings of HTTP MockRequest objects with MockHttpResponse objects.
recordings = []
# If set, the mock service HttpRequest are actually made through this object.
real_request_handler = None
def ConcealValueWithSha(source):
import sha
return sha.new(source[:-5]).hexdigest()
def DumpRecordings(conceal_func=ConcealValueWithSha):
if conceal_func:
for recording_pair in recordings:
recording_pair[0].ConcealSecrets(conceal_func)
return pickle.dumps(recordings)
def LoadRecordings(recordings_file_or_string):
if isinstance(recordings_file_or_string, str):
atom.mock_service.recordings = pickle.loads(recordings_file_or_string)
elif hasattr(recordings_file_or_string, 'read'):
atom.mock_service.recordings = pickle.loads(
recordings_file_or_string.read())
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Simulates an HTTP call to the server, makes an actual HTTP request if
real_request_handler is set.
This function operates in two different modes depending on if
real_request_handler is set or not. If real_request_handler is not set,
HttpRequest will look in this module's recordings list to find a response
which matches the parameters in the function call. If real_request_handler
is set, this function will call real_request_handler.HttpRequest, add the
response to the recordings list, and respond with the actual response.
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, uri) = atom.service.ProcessUrl(service, uri)
current_request = MockRequest(operation, full_uri, host=server, ssl=ssl,
data=data, extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# If the request handler is set, we should actually make the request using
# the request handler and record the response to replay later.
if real_request_handler:
response = real_request_handler.HttpRequest(service, operation, data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# TODO: need to copy the HTTP headers from the real response into the
# recorded_response.
recorded_response = MockHttpResponse(body=response.read(),
status=response.status, reason=response.reason)
# Insert a tuple which maps the request to the response object returned
# when making an HTTP call using the real_request_handler.
recordings.append((current_request, recorded_response))
return recorded_response
else:
# Look through available recordings to see if one matches the current
# request.
for request_response_pair in recordings:
if request_response_pair[0].IsMatch(current_request):
return request_response_pair[1]
return None
class MockRequest(object):
"""Represents a request made to an AtomPub server.
These objects are used to determine if a client request matches a recorded
HTTP request to determine what the mock server's response will be.
"""
def __init__(self, operation, uri, host=None, ssl=False, port=None,
data=None, extra_headers=None, url_params=None, escape_params=True,
content_type='application/atom+xml'):
"""Constructor for a MockRequest
Args:
operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the
HTTP operation requested on the resource.
uri: str The URL describing the resource to be modified or feed to be
retrieved. This should include the protocol (http/https) and the host
(aka domain). For example, these are some valud full_uris:
'http://example.com', 'https://www.google.com/accounts/ClientLogin'
host: str (optional) The server name which will be placed at the
beginning of the URL if the uri parameter does not begin with 'http'.
Examples include 'example.com', 'www.google.com', 'www.blogger.com'.
ssl: boolean (optional) If true, the request URL will begin with https
instead of http.
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string. (optional)
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, the constructor
will read the entire file into memory. If the data is a list of
parts to be sent, each part will be evaluated and stored.
extra_headers: dict (optional) HTTP headers included in the request.
url_params: dict (optional) Key value pairs which should be added to
the URL as URL parameters in the request. For example uri='/',
url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'.
escape_params: boolean (optional) Perform URL escaping on the keys and
values specified in url_params. Defaults to True.
content_type: str (optional) Provides the MIME type of the data being
sent.
"""
self.operation = operation
self.uri = _ConstructFullUrlBase(uri, host=host, ssl=ssl)
self.data = data
self.extra_headers = extra_headers
self.url_params = url_params or {}
self.escape_params = escape_params
self.content_type = content_type
def ConcealSecrets(self, conceal_func):
"""Conceal secret data in this request."""
if self.extra_headers.has_key('Authorization'):
self.extra_headers['Authorization'] = conceal_func(
self.extra_headers['Authorization'])
def IsMatch(self, other_request):
"""Check to see if the other_request is equivalent to this request.
Used to determine if a recording matches an incoming request so that a
recorded response should be sent to the client.
The matching is not exact, only the operation and URL are examined
currently.
Args:
other_request: MockRequest The request which we want to check this
(self) MockRequest against to see if they are equivalent.
"""
# More accurate matching logic will likely be required.
return (self.operation == other_request.operation and self.uri ==
other_request.uri)
def _ConstructFullUrlBase(uri, host=None, ssl=False):
"""Puts URL components into the form http(s)://full.host.strinf/uri/path
Used to construct a roughly canonical URL so that URLs which begin with
'http://example.com/' can be compared to a uri of '/' when the host is
set to 'example.com'
If the uri contains 'http://host' already, the host and ssl parameters
are ignored.
Args:
uri: str The path component of the URL, examples include '/'
host: str (optional) The host name which should prepend the URL. Example:
'example.com'
ssl: boolean (optional) If true, the returned URL will begin with https
instead of http.
Returns:
String which has the form http(s)://example.com/uri/string/contents
"""
if uri.startswith('http'):
return uri
if ssl:
return 'https://%s%s' % (host, uri)
else:
return 'http://%s%s' % (host, uri)
class MockHttpResponse(object):
"""Returned from MockService crud methods as the server's response."""
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Construct a mock HTTPResponse and set members.
Args:
body: str (optional) The HTTP body of the server's response.
status: int (optional)
reason: str (optional)
headers: dict (optional)
"""
self.body = body
self.status = status
self.reason = reason
self.headers = headers or {}
def read(self):
return self.body
def getheader(self, header_name):
return self.headers[header_name]

740
python/atom/service.py Normal file
View File

@ -0,0 +1,740 @@
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol.
AtomService: Encapsulates the ability to perform insert, update and delete
operations with the Atom Publishing Protocol on which GData is
based. An instance can perform query, insertion, deletion, and
update.
HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request
to the specified end point. An AtomService object or a subclass can be
used to specify information about the request.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
import atom.http
import atom.token_store
import os
import httplib
import urllib
import re
import base64
import socket
import warnings
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
class AtomService(object):
"""Performs Atom Publishing Protocol CRUD operations.
The AtomService contains methods to perform HTTP CRUD operations.
"""
# Default values for members
port = 80
ssl = False
# Set the current_token to force the AtomService to use this token
# instead of searching for an appropriate token in the token_store.
current_token = None
auto_store_tokens = True
auto_set_current_token = True
def _get_override_token(self):
return self.current_token
def _set_override_token(self, token):
self.current_token = token
override_token = property(_get_override_token, _set_override_token)
#@atom.v1_deprecated('Please use atom.client.AtomPubClient instead.')
def __init__(self, server=None, additional_headers=None,
application_name='', http_client=None, token_store=None):
"""Creates a new AtomService client.
Args:
server: string (optional) The start of a URL for the server
to which all operations should be directed. Example:
'www.google.com'
additional_headers: dict (optional) Any additional HTTP headers which
should be included with CRUD operations.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
self.http_client = http_client or atom.http.ProxiedHttpClient()
self.token_store = token_store or atom.token_store.TokenStore()
self.server = server
self.additional_headers = additional_headers or {}
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
application_name,)
# If debug is True, the HTTPConnection will display debug information
self._set_debug(False)
__init__ = atom.v1_deprecated(
'Please use atom.client.AtomPubClient instead.')(
__init__)
def _get_debug(self):
return self.http_client.debug
def _set_debug(self, value):
self.http_client.debug = value
debug = property(_get_debug, _set_debug,
doc='If True, HTTP debug information is printed.')
def use_basic_auth(self, username, password, scopes=None):
if username is not None and password is not None:
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
base_64_string = base64.encodestring('%s:%s' % (username, password))
token = BasicAuthToken('Basic %s' % base_64_string.strip(),
scopes=[atom.token_store.SCOPE_ALL])
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
return self.token_store.add_token(token)
return True
return False
def UseBasicAuth(self, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use use_basic_auth instead.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext.
Args:
username: str
password: str
"""
self.use_basic_auth(username, password)
#@atom.v1_deprecated('Please use atom.client.AtomPubClient for requests.')
def request(self, operation, url, data=None, headers=None,
url_params=None):
if isinstance(url, (str, unicode)):
if url.startswith('http:') and self.ssl:
# Force all requests to be https if self.ssl is True.
url = atom.url.parse_url('https:' + url[5:])
elif not url.startswith('http') and self.ssl:
url = atom.url.parse_url('https://%s%s' % (self.server, url))
elif not url.startswith('http'):
url = atom.url.parse_url('http://%s%s' % (self.server, url))
else:
url = atom.url.parse_url(url)
if url_params:
for name, value in url_params.iteritems():
url.params[name] = value
all_headers = self.additional_headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
content_length = CalculateDataLength(data)
if content_length:
all_headers['Content-Length'] = str(content_length)
# Find an Authorization token for this URL if one is available.
if self.override_token:
auth_token = self.override_token
else:
auth_token = self.token_store.find_token(url)
return auth_token.perform_request(self.http_client, operation, url,
data=data, headers=all_headers)
request = atom.v1_deprecated(
'Please use atom.client.AtomPubClient for requests.')(
request)
# CRUD operations
def Get(self, uri, extra_headers=None, url_params=None, escape_params=True):
"""Query the APP server with the given URI
The uri is the portion of the URI after the server value
(server example: 'www.google.com').
Example use:
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dicty (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse The server's response to the GET request.
"""
return self.request('GET', uri, data=None, headers=extra_headers,
url_params=url_params)
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Insert data into an APP server at the given URI.
Args:
data: string, ElementTree._Element, or something with a __str__ method
The XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the POST request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('POST', uri, data=data, headers=extra_headers,
url_params=url_params)
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the PUT request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('PUT', uri, data=data, headers=extra_headers,
url_params=url_params)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the DELETE request.
"""
return self.request('DELETE', uri, data=None, headers=extra_headers,
url_params=url_params)
class BasicAuthToken(atom.http_interface.GenericToken):
def __init__(self, auth_header, scopes=None):
"""Creates a token used to add Basic Auth headers to HTTP requests.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
self.auth_header = auth_header
self.scopes = scopes or []
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header to the basic auth string."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def __str__(self):
return self.auth_header
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
def PrepareConnection(service, full_uri):
"""Opens a connection to the server based on the full URI.
This method is deprecated, instead use atom.http.HttpClient.request.
Examines the target URI and the proxy settings, which are set as
environment variables, to open a connection with the server. This
connection is used to make an HTTP request.
Args:
service: atom.AtomService or a subclass. It must have a server string which
represents the server host to which the request should be made. It may also
have a dictionary of additional_headers to send in the HTTP request.
full_uri: str Which is the target relative (lacks protocol and host) or
absolute URL to be opened. Example:
'https://www.google.com/accounts/ClientLogin' or
'base/feeds/snippets' where the server is set to www.google.com.
Returns:
A tuple containing the httplib.HTTPConnection and the full_uri for the
request.
"""
deprecation('calling deprecated function PrepareConnection')
(server, port, ssl, partial_uri) = ProcessUrl(service, full_uri)
if ssl:
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % (
user_auth.strip()))
else:
proxy_authorization = ''
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port)
user_agent = 'User-Agent: %s\r\n' % (
service.additional_headers['User-Agent'])
proxy_pieces = (proxy_connect + proxy_authorization + user_agent
+ '\r\n')
#now connect, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((p_server,p_port))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status=response.split()[1]
if p_status!=str(200):
raise 'Error status=',str(p_status)
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(server)
connection.sock=fake_sock
full_uri = partial_uri
else:
connection = httplib.HTTPSConnection(server, port)
full_uri = partial_uri
else:
# destination is http
proxy = os.environ.get('http_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
UseBasicAuth(service, proxy_username, proxy_password, True)
connection = httplib.HTTPConnection(p_server, p_port)
if not full_uri.startswith("http://"):
if full_uri.startswith("/"):
full_uri = "http://%s%s" % (service.server, full_uri)
else:
full_uri = "http://%s/%s" % (service.server, full_uri)
else:
connection = httplib.HTTPConnection(server, port)
full_uri = partial_uri
return (connection, full_uri)
def UseBasicAuth(service, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use AtomService.use_basic_auth insread.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext. The auth header is added to the
additional_headers dictionary in the service object.
Args:
service: atom.AtomService or a subclass which has an
additional_headers dict as a member.
username: str
password: str
"""
deprecation('calling deprecated function UseBasicAuth')
base_64_string = base64.encodestring('%s:%s' % (username, password))
base_64_string = base_64_string.strip()
if for_proxy:
header_name = 'Proxy-Authorization'
else:
header_name = 'Authorization'
service.additional_headers[header_name] = 'Basic %s' % (base_64_string,)
def ProcessUrl(service, url, for_proxy=False):
"""Processes a passed URL. If the URL does not begin with https?, then
the default value for server is used
This method is deprecated, use atom.url.parse_url instead.
"""
if not isinstance(url, atom.url.Url):
url = atom.url.parse_url(url)
server = url.host
ssl = False
port = 80
if not server:
if hasattr(service, 'server'):
server = service.server
else:
server = service
if not url.protocol and hasattr(service, 'ssl'):
ssl = service.ssl
if hasattr(service, 'port'):
port = service.port
else:
if url.protocol == 'https':
ssl = True
elif url.protocol == 'http':
ssl = False
if url.port:
port = int(url.port)
elif port == 80 and ssl:
port = 443
return (server, port, ssl, url.get_request_uri())
def DictionaryToParamList(url_parameters, escape_params=True):
"""Convert a dictionary of URL arguments into a URL parameter string.
This function is deprcated, use atom.url.Url instead.
Args:
url_parameters: The dictionaty of key-value pairs which will be converted
into URL parameters. For example,
{'dry-run': 'true', 'foo': 'bar'}
will become ['dry-run=true', 'foo=bar'].
Returns:
A list which contains a string for each key-value pair. The strings are
ready to be incorporated into a URL by using '&'.join([] + parameter_list)
"""
# Choose which function to use when modifying the query and parameters.
# Use quote_plus when escape_params is true.
transform_op = [str, urllib.quote_plus][bool(escape_params)]
# Create a list of tuples containing the escaped version of the
# parameter-value pairs.
parameter_tuples = [(transform_op(param), transform_op(value))
for param, value in (url_parameters or {}).items()]
# Turn parameter-value tuples into a list of strings in the form
# 'PARAMETER=VALUE'.
return ['='.join(x) for x in parameter_tuples]
def BuildUri(uri, url_params=None, escape_params=True):
"""Converts a uri string and a collection of parameters into a URI.
This function is deprcated, use atom.url.Url instead.
Args:
uri: string
url_params: dict (optional)
escape_params: boolean (optional)
uri: string The start of the desired URI. This string can alrady contain
URL parameters. Examples: '/base/feeds/snippets',
'/base/feeds/snippets?bq=digital+camera'
url_parameters: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
string The URI consisting of the escaped URL parameters appended to the
initial uri string.
"""
# Prepare URL parameters for inclusion into the GET request.
parameter_list = DictionaryToParamList(url_params, escape_params)
# Append the URL parameters to the URL.
if parameter_list:
if uri.find('?') != -1:
# If there are already URL parameters in the uri string, add the
# parameters after a new & character.
full_uri = '&'.join([uri] + parameter_list)
else:
# The uri string did not have any URL parameters (no ? character)
# so put a ? between the uri and URL parameters.
full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list)))
else:
full_uri = uri
return full_uri
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This method is deprecated, use atom.http.HttpClient.request instead.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.service
client = atom.service.AtomService()
http_response = client.Get('http://www.google.com/')
or you could set the client.server to 'www.google.com' and use the
following:
client.server = 'www.google.com'
http_response = client.Get('/')
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
deprecation('call to deprecated function HttpRequest')
full_uri = BuildUri(uri, url_params, escape_params)
(connection, full_uri) = PrepareConnection(service, full_uri)
if extra_headers is None:
extra_headers = {}
# Turn on debug mode if the debug member is set.
if service.debug:
connection.debuglevel = 1
connection.putrequest(operation, full_uri)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if (data and not service.additional_headers.has_key('Content-Length') and
not extra_headers.has_key('Content-Length')):
content_length = CalculateDataLength(data)
if content_length:
extra_headers['Content-Length'] = str(content_length)
if content_type:
extra_headers['Content-Type'] = content_type
# Send the HTTP headers.
if isinstance(service.additional_headers, dict):
for header in service.additional_headers:
connection.putheader(header, service.additional_headers[header])
if isinstance(extra_headers, dict):
for header in extra_headers:
connection.putheader(header, extra_headers[header])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
__SendDataPart(data_part, connection)
else:
__SendDataPart(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def __SendDataPart(data, connection):
"""This method is deprecated, use atom.http._send_data_part"""
deprecated('call to deprecated function __SendDataPart')
if isinstance(data, str):
#TODO add handling for unicode.
connection.send(data)
return
elif ElementTree.iselement(data):
connection.send(ElementTree.tostring(data))
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
def CalculateDataLength(data):
"""Attempts to determine the length of the data to send.
This method will respond with a length only if the data is a string or
and ElementTree element.
Args:
data: object If this is not a string or ElementTree element this funtion
will return None.
"""
if isinstance(data, str):
return len(data)
elif isinstance(data, list):
return None
elif ElementTree.iselement(data):
return len(ElementTree.tostring(data))
elif hasattr(data, 'read'):
# If this is a file-like object, don't try to guess the length.
return None
else:
return len(str(data))
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)

117
python/atom/token_store.py Normal file
View File

@ -0,0 +1,117 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a TokenStore class which is designed to manage
auth tokens required for different services.
Each token is valid for a set of scopes which is the start of a URL. An HTTP
client will use a token store to find a valid Authorization header to send
in requests to the specified URL. If the HTTP client determines that a token
has expired or been revoked, it can remove the token from the store so that
it will not be used in future requests.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
SCOPE_ALL = 'http'
class TokenStore(object):
"""Manages Authorization tokens which will be sent in HTTP headers."""
def __init__(self, scoped_tokens=None):
self._tokens = scoped_tokens or {}
def add_token(self, token):
"""Adds a new token to the store (replaces tokens with the same scope).
Args:
token: A subclass of http_interface.GenericToken. The token object is
responsible for adding the Authorization header to the HTTP request.
The scopes defined in the token are used to determine if the token
is valid for a requested scope when find_token is called.
Returns:
True if the token was added, False if the token was not added becase
no scopes were provided.
"""
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
self._tokens[str(scope)] = token
return True
def find_token(self, url):
"""Selects an Authorization header token which can be used for the URL.
Args:
url: str or atom.url.Url or a list containing the same.
The URL which is going to be requested. All
tokens are examined to see if any scopes begin match the beginning
of the URL. The first match found is returned.
Returns:
The token object which should execute the HTTP request. If there was
no token for the url (the url did not begin with any of the token
scopes available), then the atom.http_interface.GenericToken will be
returned because the GenericToken calls through to the http client
without adding an Authorization header.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if url in self._tokens:
token = self._tokens[url]
if token.valid_for_scope(url):
return token
else:
del self._tokens[url]
for scope, token in self._tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the token_store.
This method is used when a token is determined to be invalid. If the
token was found by find_token, but resulted in a 401 or 403 error stating
that the token was invlid, then the token should be removed to prevent
future use.
Returns:
True if a token was found and then removed from the token
store. False if the token was not in the TokenStore.
"""
token_found = False
scopes_to_delete = []
for scope, stored_token in self._tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del self._tokens[scope]
return token_found
def remove_all_tokens(self):
self._tokens = {}

139
python/atom/url.py Normal file
View File

@ -0,0 +1,139 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import urlparse
import urllib
DEFAULT_PROTOCOL = 'http'
DEFAULT_PORT = 80
def parse_url(url_string):
"""Creates a Url object which corresponds to the URL string.
This method can accept partial URLs, but it will leave missing
members of the Url unset.
"""
parts = urlparse.urlparse(url_string)
url = Url()
if parts[0]:
url.protocol = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
url.host = host_parts[0]
if len(host_parts) > 1:
url.port = host_parts[1]
if parts[2]:
url.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
url.params[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
url.params[urllib.unquote_plus(pair_parts[0])] = None
return url
class Url(object):
"""Represents a URL and implements comparison logic.
URL strings which are not identical can still be equivalent, so this object
provides a better interface for comparing and manipulating URLs than
strings. URL parameters are represented as a dictionary of strings, and
defaults are used for the protocol (http) and port (80) if not provided.
"""
def __init__(self, protocol=None, host=None, port=None, path=None,
params=None):
self.protocol = protocol
self.host = host
self.port = port
self.path = path
self.params = params or {}
def to_string(self):
url_parts = ['', '', '', '', '', '']
if self.protocol:
url_parts[0] = self.protocol
if self.host:
if self.port:
url_parts[1] = ':'.join((self.host, str(self.port)))
else:
url_parts[1] = self.host
if self.path:
url_parts[2] = self.path
if self.params:
url_parts[4] = self.get_param_string()
return urlparse.urlunparse(url_parts)
def get_param_string(self):
param_pairs = []
for key, value in self.params.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def get_request_uri(self):
"""Returns the path with the parameters escaped and appended."""
param_string = self.get_param_string()
if param_string:
return '?'.join([self.path, param_string])
else:
return self.path
def __cmp__(self, other):
if not isinstance(other, Url):
return cmp(self.to_string(), str(other))
difference = 0
# Compare the protocol
if self.protocol and other.protocol:
difference = cmp(self.protocol, other.protocol)
elif self.protocol and not other.protocol:
difference = cmp(self.protocol, DEFAULT_PROTOCOL)
elif not self.protocol and other.protocol:
difference = cmp(DEFAULT_PROTOCOL, other.protocol)
if difference != 0:
return difference
# Compare the host
difference = cmp(self.host, other.host)
if difference != 0:
return difference
# Compare the port
if self.port and other.port:
difference = cmp(self.port, other.port)
elif self.port and not other.port:
difference = cmp(self.port, DEFAULT_PORT)
elif not self.port and other.port:
difference = cmp(DEFAULT_PORT, other.port)
if difference != 0:
return difference
# Compare the path
difference = cmp(self.path, other.path)
if difference != 0:
return difference
# Compare the parameters
return cmp(self.params, other.params)
def __str__(self):
return self.to_string()

View File

@ -0,0 +1,33 @@
"""Secret-key encryption algorithms.
Secret-key encryption algorithms transform plaintext in some way that
is dependent on a key, producing ciphertext. This transformation can
easily be reversed, if (and, hopefully, only if) one knows the key.
The encryption modules here all support the interface described in PEP
272, "API for Block Encryption Algorithms".
If you don't know which algorithm to choose, use AES because it's
standard and has undergone a fair bit of examination.
Crypto.Cipher.AES Advanced Encryption Standard
Crypto.Cipher.ARC2 Alleged RC2
Crypto.Cipher.ARC4 Alleged RC4
Crypto.Cipher.Blowfish
Crypto.Cipher.CAST
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
in the past, but today its 56-bit keys are too small.
Crypto.Cipher.DES3 Triple DES.
Crypto.Cipher.IDEA
Crypto.Cipher.RC5
Crypto.Cipher.XOR The simple XOR cipher.
"""
__all__ = ['AES', 'ARC2', 'ARC4',
'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5',
'XOR'
]
__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $"

View File

@ -0,0 +1,108 @@
"""HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
This is just a copy of the Python 2.2 HMAC module, modified to work when
used on versions of Python before 2.2.
"""
__revision__ = "$Id: HMAC.py,v 1.5 2002/07/25 17:19:02 z3p Exp $"
import string
def _strxor(s1, s2):
"""Utility method. XOR the two strings s1 and s2 (must have same length).
"""
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used.
digest_size = None
class HMAC:
"""RFC2104 HMAC class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod == None:
import md5
digestmod = md5
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
try:
self.digest_size = digestmod.digest_size
except AttributeError:
self.digest_size = len(self.outer.digest())
blocksize = 64
ipad = "\x36" * blocksize
opad = "\x5C" * blocksize
if len(key) > blocksize:
key = digestmod.new(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(_strxor(key, opad))
self.inner.update(_strxor(key, ipad))
if (msg):
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = HMAC("")
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([string.zfill(hex(ord(x))[2:], 2)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)

View File

@ -0,0 +1,13 @@
# Just use the MD5 module from the Python standard library
__revision__ = "$Id: MD5.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $"
from md5 import *
import md5
if hasattr(md5, 'digestsize'):
digest_size = digestsize
del digestsize
del md5

View File

@ -0,0 +1,11 @@
# Just use the SHA module from the Python standard library
__revision__ = "$Id: SHA.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $"
from sha import *
import sha
if hasattr(sha, 'digestsize'):
digest_size = digestsize
del digestsize
del sha

View File

@ -0,0 +1,24 @@
"""Hashing algorithms
Hash functions take arbitrary strings as input, and produce an output
of fixed size that is dependent on the input; it should never be
possible to derive the input data given only the hash function's
output. Hash functions can be used simply as a checksum, or, in
association with a public-key algorithm, can be used to implement
digital signatures.
The hashing modules here all support the interface described in PEP
247, "API for Cryptographic Hash Functions".
Submodules:
Crypto.Hash.HMAC RFC 2104: Keyed-Hashing for Message Authentication
Crypto.Hash.MD2
Crypto.Hash.MD4
Crypto.Hash.MD5
Crypto.Hash.RIPEMD
Crypto.Hash.SHA
"""
__all__ = ['HMAC', 'MD2', 'MD4', 'MD5', 'RIPEMD', 'SHA', 'SHA256']
__revision__ = "$Id: __init__.py,v 1.6 2003/12/19 14:24:25 akuchling Exp $"

View File

@ -0,0 +1,295 @@
"""This file implements all-or-nothing package transformations.
An all-or-nothing package transformation is one in which some text is
transformed into message blocks, such that all blocks must be obtained before
the reverse transformation can be applied. Thus, if any blocks are corrupted
or lost, the original message cannot be reproduced.
An all-or-nothing package transformation is not encryption, although a block
cipher algorithm is used. The encryption key is randomly generated and is
extractable from the message blocks.
This class implements the All-Or-Nothing package transformation algorithm
described in:
Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform"
http://theory.lcs.mit.edu/~rivest/fusion.pdf
"""
__revision__ = "$Id: AllOrNothing.py,v 1.8 2003/02/28 15:23:20 akuchling Exp $"
import operator
import string
from Crypto.Util.number import bytes_to_long, long_to_bytes
class AllOrNothing:
"""Class implementing the All-or-Nothing package transform.
Methods for subclassing:
_inventkey(key_size):
Returns a randomly generated key. Subclasses can use this to
implement better random key generating algorithms. The default
algorithm is probably not very cryptographically secure.
"""
def __init__(self, ciphermodule, mode=None, IV=None):
"""AllOrNothing(ciphermodule, mode=None, IV=None)
ciphermodule is a module implementing the cipher algorithm to
use. It must provide the PEP272 interface.
Note that the encryption key is randomly generated
automatically when needed. Optional arguments mode and IV are
passed directly through to the ciphermodule.new() method; they
are the feedback mode and initialization vector to use. All
three arguments must be the same for the object used to create
the digest, and to undigest'ify the message blocks.
"""
self.__ciphermodule = ciphermodule
self.__mode = mode
self.__IV = IV
self.__key_size = ciphermodule.key_size
if self.__key_size == 0:
self.__key_size = 16
__K0digit = chr(0x69)
def digest(self, text):
"""digest(text:string) : [string]
Perform the All-or-Nothing package transform on the given
string. Output is a list of message blocks describing the
transformed text, where each block is a string of bit length equal
to the ciphermodule's block_size.
"""
# generate a random session key and K0, the key used to encrypt the
# hash blocks. Rivest calls this a fixed, publically-known encryption
# key, but says nothing about the security implications of this key or
# how to choose it.
key = self._inventkey(self.__key_size)
K0 = self.__K0digit * self.__key_size
# we need two cipher objects here, one that is used to encrypt the
# message blocks and one that is used to encrypt the hashes. The
# former uses the randomly generated key, while the latter uses the
# well-known key.
mcipher = self.__newcipher(key)
hcipher = self.__newcipher(K0)
# Pad the text so that its length is a multiple of the cipher's
# block_size. Pad with trailing spaces, which will be eliminated in
# the undigest() step.
block_size = self.__ciphermodule.block_size
padbytes = block_size - (len(text) % block_size)
text = text + ' ' * padbytes
# Run through the algorithm:
# s: number of message blocks (size of text / block_size)
# input sequence: m1, m2, ... ms
# random key K' (`key' in the code)
# Compute output sequence: m'1, m'2, ... m's' for s' = s + 1
# Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s
# Let m's' = K' ^ h1 ^ h2 ^ ... hs
# where hi = E(K0, m'i ^ i) for i = 1, 2, ... s
#
# The one complication I add is that the last message block is hard
# coded to the number of padbytes added, so that these can be stripped
# during the undigest() step
s = len(text) / block_size
blocks = []
hashes = []
for i in range(1, s+1):
start = (i-1) * block_size
end = start + block_size
mi = text[start:end]
assert len(mi) == block_size
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate the hash block for this block
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Add the padbytes length as a message block
i = i + 1
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = padbytes ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate this block's hash
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Now calculate the last message block of the sequence 1..s'. This
# will contain the random session key XOR'd with all the hash blocks,
# so that for undigest(), once all the hash blocks are calculated, the
# session key can be trivially extracted. Calculating all the hash
# blocks requires that all the message blocks be received, thus the
# All-or-Nothing algorithm succeeds.
mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes)
blocks.append(mtick_stick)
# we convert the blocks to strings since in Python, byte sequences are
# always represented as strings. This is more consistent with the
# model that encryption and hash algorithms always operate on strings.
return map(long_to_bytes, blocks)
def undigest(self, blocks):
"""undigest(blocks : [string]) : string
Perform the reverse package transformation on a list of message
blocks. Note that the ciphermodule used for both transformations
must be the same. blocks is a list of strings of bit length
equal to the ciphermodule's block_size.
"""
# better have at least 2 blocks, for the padbytes package and the hash
# block accumulator
if len(blocks) < 2:
raise ValueError, "List must be at least length 2."
# blocks is a list of strings. We need to deal with them as long
# integers
blocks = map(bytes_to_long, blocks)
# Calculate the well-known key, to which the hash blocks are
# encrypted, and create the hash cipher.
K0 = self.__K0digit * self.__key_size
hcipher = self.__newcipher(K0)
# Since we have all the blocks (or this method would have been called
# prematurely), we can calcualte all the hash blocks.
hashes = []
for i in range(1, len(blocks)):
mticki = blocks[i-1] ^ i
hi = hcipher.encrypt(long_to_bytes(mticki))
hashes.append(bytes_to_long(hi))
# now we can calculate K' (key). remember the last block contains
# m's' which we don't include here
key = blocks[-1] ^ reduce(operator.xor, hashes)
# and now we can create the cipher object
mcipher = self.__newcipher(long_to_bytes(key))
block_size = self.__ciphermodule.block_size
# And we can now decode the original message blocks
parts = []
for i in range(1, len(blocks)):
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mi = blocks[i-1] ^ bytes_to_long(cipherblock)
parts.append(mi)
# The last message block contains the number of pad bytes appended to
# the original text string, such that its length was an even multiple
# of the cipher's block_size. This number should be small enough that
# the conversion from long integer to integer should never overflow
padbytes = int(parts[-1])
text = string.join(map(long_to_bytes, parts[:-1]), '')
return text[:-padbytes]
def _inventkey(self, key_size):
# TBD: Not a very secure algorithm. Eventually, I'd like to use JHy's
# kernelrand module
import time
from Crypto.Util import randpool
# TBD: key_size * 2 to work around possible bug in RandomPool?
pool = randpool.RandomPool(key_size * 2)
while key_size > pool.entropy:
pool.add_event()
# we now have enough entropy in the pool to get a key_size'd key
return pool.get_bytes(key_size)
def __newcipher(self, key):
if self.__mode is None and self.__IV is None:
return self.__ciphermodule.new(key)
elif self.__IV is None:
return self.__ciphermodule.new(key, self.__mode)
else:
return self.__ciphermodule.new(key, self.__mode, self.__IV)
if __name__ == '__main__':
import sys
import getopt
import base64
usagemsg = '''\
Test module usage: %(program)s [-c cipher] [-l] [-h]
Where:
--cipher module
-c module
Cipher module to use. Default: %(ciphermodule)s
--aslong
-l
Print the encoded message blocks as long integers instead of base64
encoded strings
--help
-h
Print this help message
'''
ciphermodule = 'AES'
aslong = 0
def usage(code, msg=None):
if msg:
print msg
print usagemsg % {'program': sys.argv[0],
'ciphermodule': ciphermodule}
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:],
'c:l', ['cipher=', 'aslong'])
except getopt.error, msg:
usage(1, msg)
if args:
usage(1, 'Too many arguments')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-c', '--cipher'):
ciphermodule = arg
elif opt in ('-l', '--aslong'):
aslong = 1
# ugly hack to force __import__ to give us the end-path module
module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new'])
a = AllOrNothing(module)
print 'Original text:\n=========='
print __doc__
print '=========='
msgblocks = a.digest(__doc__)
print 'message blocks:'
for i, blk in map(None, range(len(msgblocks)), msgblocks):
# base64 adds a trailing newline
print ' %3d' % i,
if aslong:
print bytes_to_long(blk)
else:
print base64.encodestring(blk)[:-1]
#
# get a new undigest-only object so there's no leakage
b = AllOrNothing(module)
text = b.undigest(msgblocks)
if text == __doc__:
print 'They match!'
else:
print 'They differ!'

View File

@ -0,0 +1,229 @@
"""This file implements the chaffing algorithm.
Winnowing and chaffing is a technique for enhancing privacy without requiring
strong encryption. In short, the technique takes a set of authenticated
message blocks (the wheat) and adds a number of chaff blocks which have
randomly chosen data and MAC fields. This means that to an adversary, the
chaff blocks look as valid as the wheat blocks, and so the authentication
would have to be performed on every block. By tailoring the number of chaff
blocks added to the message, the sender can make breaking the message
computationally infeasible. There are many other interesting properties of
the winnow/chaff technique.
For example, say Alice is sending a message to Bob. She packetizes the
message and performs an all-or-nothing transformation on the packets. Then
she authenticates each packet with a message authentication code (MAC). The
MAC is a hash of the data packet, and there is a secret key which she must
share with Bob (key distribution is an exercise left to the reader). She then
adds a serial number to each packet, and sends the packets to Bob.
Bob receives the packets, and using the shared secret authentication key,
authenticates the MACs for each packet. Those packets that have bad MACs are
simply discarded. The remainder are sorted by serial number, and passed
through the reverse all-or-nothing transform. The transform means that an
eavesdropper (say Eve) must acquire all the packets before any of the data can
be read. If even one packet is missing, the data is useless.
There's one twist: by adding chaff packets, Alice and Bob can make Eve's job
much harder, since Eve now has to break the shared secret key, or try every
combination of wheat and chaff packet to read any of the message. The cool
thing is that Bob doesn't need to add any additional code; the chaff packets
are already filtered out because their MACs don't match (in all likelihood --
since the data and MACs for the chaff packets are randomly chosen it is
possible, but very unlikely that a chaff MAC will match the chaff data). And
Alice need not even be the party adding the chaff! She could be completely
unaware that a third party, say Charles, is adding chaff packets to her
messages as they are transmitted.
For more information on winnowing and chaffing see this paper:
Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption"
http://theory.lcs.mit.edu/~rivest/chaffing.txt
"""
__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $"
from Crypto.Util.number import bytes_to_long
class Chaff:
"""Class implementing the chaff adding algorithm.
Methods for subclasses:
_randnum(size):
Returns a randomly generated number with a byte-length equal
to size. Subclasses can use this to implement better random
data and MAC generating algorithms. The default algorithm is
probably not very cryptographically secure. It is most
important that the chaff data does not contain any patterns
that can be used to discern it from wheat data without running
the MAC.
"""
def __init__(self, factor=1.0, blocksper=1):
"""Chaff(factor:float, blocksper:int)
factor is the number of message blocks to add chaff to,
expressed as a percentage between 0.0 and 1.0. blocksper is
the number of chaff blocks to include for each block being
chaffed. Thus the defaults add one chaff block to every
message block. By changing the defaults, you can adjust how
computationally difficult it could be for an adversary to
brute-force crack the message. The difficulty is expressed
as:
pow(blocksper, int(factor * number-of-blocks))
For ease of implementation, when factor < 1.0, only the first
int(factor*number-of-blocks) message blocks are chaffed.
"""
if not (0.0<=factor<=1.0):
raise ValueError, "'factor' must be between 0.0 and 1.0"
if blocksper < 0:
raise ValueError, "'blocksper' must be zero or more"
self.__factor = factor
self.__blocksper = blocksper
def chaff(self, blocks):
"""chaff( [(serial-number:int, data:string, MAC:string)] )
: [(int, string, string)]
Add chaff to message blocks. blocks is a list of 3-tuples of the
form (serial-number, data, MAC).
Chaff is created by choosing a random number of the same
byte-length as data, and another random number of the same
byte-length as MAC. The message block's serial number is
placed on the chaff block and all the packet's chaff blocks
are randomly interspersed with the single wheat block. This
method then returns a list of 3-tuples of the same form.
Chaffed blocks will contain multiple instances of 3-tuples
with the same serial number, but the only way to figure out
which blocks are wheat and which are chaff is to perform the
MAC hash and compare values.
"""
chaffedblocks = []
# count is the number of blocks to add chaff to. blocksper is the
# number of chaff blocks to add per message block that is being
# chaffed.
count = len(blocks) * self.__factor
blocksper = range(self.__blocksper)
for i, wheat in map(None, range(len(blocks)), blocks):
# it shouldn't matter which of the n blocks we add chaff to, so for
# ease of implementation, we'll just add them to the first count
# blocks
if i < count:
serial, data, mac = wheat
datasize = len(data)
macsize = len(mac)
addwheat = 1
# add chaff to this block
for j in blocksper:
import sys
chaffdata = self._randnum(datasize)
chaffmac = self._randnum(macsize)
chaff = (serial, chaffdata, chaffmac)
# mix up the order, if the 5th bit is on then put the
# wheat on the list
if addwheat and bytes_to_long(self._randnum(16)) & 0x40:
chaffedblocks.append(wheat)
addwheat = 0
chaffedblocks.append(chaff)
if addwheat:
chaffedblocks.append(wheat)
else:
# just add the wheat
chaffedblocks.append(wheat)
return chaffedblocks
def _randnum(self, size):
# TBD: Not a very secure algorithm.
# TBD: size * 2 to work around possible bug in RandomPool
from Crypto.Util import randpool
import time
pool = randpool.RandomPool(size * 2)
while size > pool.entropy:
pass
# we now have enough entropy in the pool to get size bytes of random
# data... well, probably
return pool.get_bytes(size)
if __name__ == '__main__':
text = """\
We hold these truths to be self-evident, that all men are created equal, that
they are endowed by their Creator with certain unalienable Rights, that among
these are Life, Liberty, and the pursuit of Happiness. That to secure these
rights, Governments are instituted among Men, deriving their just powers from
the consent of the governed. That whenever any Form of Government becomes
destructive of these ends, it is the Right of the People to alter or to
abolish it, and to institute new Government, laying its foundation on such
principles and organizing its powers in such form, as to them shall seem most
likely to effect their Safety and Happiness.
"""
print 'Original text:\n=========='
print text
print '=========='
# first transform the text into packets
blocks = [] ; size = 40
for i in range(0, len(text), size):
blocks.append( text[i:i+size] )
# now get MACs for all the text blocks. The key is obvious...
print 'Calculating MACs...'
from Crypto.Hash import HMAC, SHA
key = 'Jefferson'
macs = [HMAC.new(key, block, digestmod=SHA).digest()
for block in blocks]
assert len(blocks) == len(macs)
# put these into a form acceptable as input to the chaffing procedure
source = []
m = map(None, range(len(blocks)), blocks, macs)
print m
for i, data, mac in m:
source.append((i, data, mac))
# now chaff these
print 'Adding chaff...'
c = Chaff(factor=0.5, blocksper=2)
chaffed = c.chaff(source)
from base64 import encodestring
# print the chaffed message blocks. meanwhile, separate the wheat from
# the chaff
wheat = []
print 'chaffed message blocks:'
for i, data, mac in chaffed:
# do the authentication
h = HMAC.new(key, data, digestmod=SHA)
pmac = h.digest()
if pmac == mac:
tag = '-->'
wheat.append(data)
else:
tag = ' '
# base64 adds a trailing newline
print tag, '%3d' % i, \
repr(data), encodestring(mac)[:-1]
# now decode the message packets and check it against the original text
print 'Undigesting wheat...'
newtext = "".join(wheat)
if newtext == text:
print 'They match!'
else:
print 'They differ!'

View File

@ -0,0 +1,17 @@
"""Cryptographic protocols
Implements various cryptographic protocols. (Don't expect to find
network protocols here.)
Crypto.Protocol.AllOrNothing Transforms a message into a set of message
blocks, such that the blocks can be
recombined to get the message back.
Crypto.Protocol.Chaffing Takes a set of authenticated message blocks
(the wheat) and adds a number of
randomly generated blocks (the chaff).
"""
__all__ = ['AllOrNothing', 'Chaffing']
__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:23:21 akuchling Exp $"

View File

@ -0,0 +1,238 @@
#
# DSA.py : Digital Signature Algorithm
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $"
from Crypto.PublicKey.pubkey import *
from Crypto.Util import number
from Crypto.Util.number import bytes_to_long, long_to_bytes
from Crypto.Hash import SHA
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
class error (Exception):
pass
def generateQ(randfunc):
S=randfunc(20)
hash1=SHA.new(S).digest()
hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest()
q = bignum(0)
for i in range(0,20):
c=ord(hash1[i])^ord(hash2[i])
if i==0:
c=c | 128
if i==19:
c= c | 1
q=q*256+c
while (not isPrime(q)):
q=q+2
if pow(2,159L) < q < pow(2,160L):
return S, q
raise error, 'Bad q value generated'
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a DSA key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
if bits<160:
raise error, 'Key length <160 bits'
obj=DSAobj()
# Generate string S and prime q
if progress_func:
progress_func('p,q\n')
while (1):
S, obj.q = generateQ(randfunc)
n=(bits-1)/160
C, N, V = 0, 2, {}
b=(obj.q >> 5) & 15
powb=pow(bignum(2), b)
powL1=pow(bignum(2), bits-1)
while C<4096:
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
W=V[n] % powb
for k in range(n-1, -1, -1):
W=(W<<160L)+V[k]
X=W+powL1
p=X-(X%(2*obj.q)-1)
if powL1<=p and isPrime(p):
break
C, N = C+1, N+n+1
if C<4096:
break
if progress_func:
progress_func('4096 multiples failed\n')
obj.p = p
power=(p-1)/obj.q
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y = x, pow(g, x, p)
return obj
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj
Construct a DSA object from a 4- or 5-tuple of numbers.
"""
obj=DSAobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class DSAobj(pubkey):
keydata=['y', 'g', 'p', 'q', 'x']
def _encrypt(self, s, Kstr):
raise error, 'DSA algorithm cannot encrypt data'
def _decrypt(self, s):
raise error, 'DSA algorithm cannot decrypt data'
def _sign(self, M, K):
if (K<2 or self.q<=K):
raise error, 'K is not between 2 and q'
r=pow(self.g, K, self.p) % self.q
s=(inverse(K, self.q)*(M+self.x*r)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
w=inverse(s, self.q)
u1, u2 = (M*w) % self.q, (r*w) % self.q
v1 = pow(self.g, u1, self.p)
v2 = pow(self.y, u2, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return number.size(self.p) - 1
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
if hasattr(self, 'x'):
return 1
else:
return 0
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.y, self.g, self.p, self.q))
object=DSAobj
generate_py = generate
construct_py = construct
class DSAobj_c(pubkey):
keydata = ['y', 'g', 'p', 'q', 'x']
def __init__(self, key):
self.key = key
def __getattr__(self, attr):
if attr in self.keydata:
return getattr(self.key, attr)
else:
if self.__dict__.has_key(attr):
self.__dict__[attr]
else:
raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr)
def __getstate__(self):
d = {}
for k in self.keydata:
if hasattr(self.key, k):
d[k]=getattr(self.key, k)
return d
def __setstate__(self, state):
y,g,p,q = state['y'], state['g'], state['p'], state['q']
if not state.has_key('x'):
self.key = _fastmath.dsa_construct(y,g,p,q)
else:
x = state['x']
self.key = _fastmath.dsa_construct(y,g,p,q,x)
def _sign(self, M, K):
return self.key._sign(M, K)
def _verify(self, M, (r, s)):
return self.key._verify(M, r, s)
def size(self):
return self.key.size()
def has_private(self):
return self.key.has_private()
def publickey(self):
return construct_c((self.key.y, self.key.g, self.key.p, self.key.q))
def can_sign(self):
return 1
def can_encrypt(self):
return 0
def generate_c(bits, randfunc, progress_func=None):
obj = generate_py(bits, randfunc, progress_func)
y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x
return construct_c((y,g,p,q,x))
def construct_c(tuple):
key = apply(_fastmath.dsa_construct, tuple)
return DSAobj_c(key)
if _fastmath:
#print "using C version of DSA"
generate = generate_c
construct = construct_c
error = _fastmath.error

View File

@ -0,0 +1,132 @@
#
# ElGamal.py : ElGamal encryption/decryption and signatures
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: ElGamal.py,v 1.9 2003/04/04 19:44:26 akuchling Exp $"
from Crypto.PublicKey.pubkey import *
from Crypto.Util import number
class error (Exception):
pass
# Generate an ElGamal key with N bits
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate an ElGamal key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=ElGamalobj()
# Generate prime p
if progress_func:
progress_func('p\n')
obj.p=bignum(getPrime(bits, randfunc))
# Generate random number g
if progress_func:
progress_func('g\n')
size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p
if size<1:
size=bits-1
while (1):
obj.g=bignum(getPrime(size, randfunc))
if obj.g < obj.p:
break
size=(size+1) % bits
if size==0:
size=4
# Generate random number x
if progress_func:
progress_func('x\n')
while (1):
size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p
if size>2:
break
while (1):
obj.x=bignum(getPrime(size, randfunc))
if obj.x < obj.p:
break
size = (size+1) % bits
if size==0:
size=4
if progress_func:
progress_func('y\n')
obj.y = pow(obj.g, obj.x, obj.p)
return obj
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)))
: ElGamalobj
Construct an ElGamal key from a 3- or 4-tuple of numbers.
"""
obj=ElGamalobj()
if len(tuple) not in [3,4]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class ElGamalobj(pubkey):
keydata=['p', 'g', 'y', 'x']
def _encrypt(self, M, K):
a=pow(self.g, K, self.p)
b=( M*pow(self.y, K, self.p) ) % self.p
return ( a,b )
def _decrypt(self, M):
if (not hasattr(self, 'x')):
raise error, 'Private key not available in this object'
ax=pow(M[0], self.x, self.p)
plaintext=(M[1] * inverse(ax, self.p ) ) % self.p
return plaintext
def _sign(self, M, K):
if (not hasattr(self, 'x')):
raise error, 'Private key not available in this object'
p1=self.p-1
if (GCD(K, p1)!=1):
raise error, 'Bad K value: GCD(K,p-1)!=1'
a=pow(self.g, K, self.p)
t=(M-self.x*a) % p1
while t<0: t=t+p1
b=(t*inverse(K, p1)) % p1
return (a, b)
def _verify(self, M, sig):
v1=pow(self.y, sig[0], self.p)
v1=(v1*pow(sig[0], sig[1], self.p)) % self.p
v2=pow(self.g, M, self.p)
if v1==v2:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return number.size(self.p) - 1
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
if hasattr(self, 'x'):
return 1
else:
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.g, self.y))
object=ElGamalobj

View File

@ -0,0 +1,256 @@
#
# RSA.py : RSA encryption/decryption
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: RSA.py,v 1.20 2004/05/06 12:52:54 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util import number
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
class error (Exception):
pass
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate an RSA key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=RSAobj()
# Generate the prime factors of n
if progress_func:
progress_func('p,q\n')
p = q = 1L
while number.size(p*q) < bits:
p = pubkey.getPrime(bits/2, randfunc)
q = pubkey.getPrime(bits/2, randfunc)
# p shall be smaller than q (for calc of u)
if p > q:
(p, q)=(q, p)
obj.p = p
obj.q = q
if progress_func:
progress_func('u\n')
obj.u = pubkey.inverse(obj.p, obj.q)
obj.n = obj.p*obj.q
obj.e = 65537L
if progress_func:
progress_func('d\n')
obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1))
assert bits <= 1+obj.size(), "Generated key is too small"
return obj
def construct(tuple):
"""construct(tuple:(long,) : RSAobj
Construct an RSA object from a 2-, 3-, 5-, or 6-tuple of numbers.
"""
obj=RSAobj()
if len(tuple) not in [2,3,5,6]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
if len(tuple) >= 5:
# Ensure p is smaller than q
if obj.p>obj.q:
(obj.p, obj.q)=(obj.q, obj.p)
if len(tuple) == 5:
# u not supplied, so we're going to have to compute it.
obj.u=pubkey.inverse(obj.p, obj.q)
return obj
class RSAobj(pubkey.pubkey):
keydata = ['n', 'e', 'd', 'p', 'q', 'u']
def _encrypt(self, plaintext, K=''):
if self.n<=plaintext:
raise error, 'Plaintext too large'
return (pow(plaintext, self.e, self.n),)
def _decrypt(self, ciphertext):
if (not hasattr(self, 'd')):
raise error, 'Private key not available in this object'
if self.n<=ciphertext[0]:
raise error, 'Ciphertext too large'
return pow(ciphertext[0], self.d, self.n)
def _sign(self, M, K=''):
return (self._decrypt((M,)),)
def _verify(self, M, sig):
m2=self._encrypt(sig[0])
if m2[0]==M:
return 1
else: return 0
def _blind(self, M, B):
tmp = pow(B, self.e, self.n)
return (M * tmp) % self.n
def _unblind(self, M, B):
tmp = pubkey.inverse(B, self.n)
return (M * tmp) % self.n
def can_blind (self):
"""can_blind() : bool
Return a Boolean value recording whether this algorithm can
blind data. (This does not imply that this
particular key object has the private information required to
to blind a message.)
"""
return 1
def size(self):
"""size() : int
Return the maximum number of bits that can be handled by this key.
"""
return number.size(self.n) - 1
def has_private(self):
"""has_private() : bool
Return a Boolean denoting whether the object contains
private components.
"""
if hasattr(self, 'd'):
return 1
else: return 0
def publickey(self):
"""publickey(): RSAobj
Return a new key object containing only the public key information.
"""
return construct((self.n, self.e))
class RSAobj_c(pubkey.pubkey):
keydata = ['n', 'e', 'd', 'p', 'q', 'u']
def __init__(self, key):
self.key = key
def __getattr__(self, attr):
if attr in self.keydata:
return getattr(self.key, attr)
else:
if self.__dict__.has_key(attr):
self.__dict__[attr]
else:
raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr)
def __getstate__(self):
d = {}
for k in self.keydata:
if hasattr(self.key, k):
d[k]=getattr(self.key, k)
return d
def __setstate__(self, state):
n,e = state['n'], state['e']
if not state.has_key('d'):
self.key = _fastmath.rsa_construct(n,e)
else:
d = state['d']
if not state.has_key('q'):
self.key = _fastmath.rsa_construct(n,e,d)
else:
p, q, u = state['p'], state['q'], state['u']
self.key = _fastmath.rsa_construct(n,e,d,p,q,u)
def _encrypt(self, plain, K):
return (self.key._encrypt(plain),)
def _decrypt(self, cipher):
return self.key._decrypt(cipher[0])
def _sign(self, M, K):
return (self.key._sign(M),)
def _verify(self, M, sig):
return self.key._verify(M, sig[0])
def _blind(self, M, B):
return self.key._blind(M, B)
def _unblind(self, M, B):
return self.key._unblind(M, B)
def can_blind (self):
return 1
def size(self):
return self.key.size()
def has_private(self):
return self.key.has_private()
def publickey(self):
return construct_c((self.key.n, self.key.e))
def generate_c(bits, randfunc, progress_func = None):
# Generate the prime factors of n
if progress_func:
progress_func('p,q\n')
p = q = 1L
while number.size(p*q) < bits:
p = pubkey.getPrime(bits/2, randfunc)
q = pubkey.getPrime(bits/2, randfunc)
# p shall be smaller than q (for calc of u)
if p > q:
(p, q)=(q, p)
if progress_func:
progress_func('u\n')
u=pubkey.inverse(p, q)
n=p*q
e = 65537L
if progress_func:
progress_func('d\n')
d=pubkey.inverse(e, (p-1)*(q-1))
key = _fastmath.rsa_construct(n,e,d,p,q,u)
obj = RSAobj_c(key)
## print p
## print q
## print number.size(p), number.size(q), number.size(q*p),
## print obj.size(), bits
assert bits <= 1+obj.size(), "Generated key is too small"
return obj
def construct_c(tuple):
key = apply(_fastmath.rsa_construct, tuple)
return RSAobj_c(key)
object = RSAobj
generate_py = generate
construct_py = construct
if _fastmath:
#print "using C version of RSA"
generate = generate_c
construct = construct_c
error = _fastmath.error

View File

@ -0,0 +1,17 @@
"""Public-key encryption and signature algorithms.
Public-key encryption uses two different keys, one for encryption and
one for decryption. The encryption key can be made public, and the
decryption key is kept private. Many public-key algorithms can also
be used to sign messages, and some can *only* be used for signatures.
Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only)
Crypto.PublicKey.ElGamal (Signing and encryption)
Crypto.PublicKey.RSA (Signing, encryption, and blinding)
Crypto.PublicKey.qNEW (Signature only)
"""
__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW']
__revision__ = "$Id: __init__.py,v 1.4 2003/04/03 20:27:13 akuchling Exp $"

View File

@ -0,0 +1,172 @@
#
# pubkey.py : Internal functions for public key operations
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: pubkey.py,v 1.11 2003/04/03 20:36:14 akuchling Exp $"
import types, warnings
from Crypto.Util.number import *
# Basic public key class
class pubkey:
def __init__(self):
pass
def __getstate__(self):
"""To keep key objects platform-independent, the key data is
converted to standard Python long integers before being
written out. It will then be reconverted as necessary on
restoration."""
d=self.__dict__
for key in self.keydata:
if d.has_key(key): d[key]=long(d[key])
return d
def __setstate__(self, d):
"""On unpickling a key object, the key data is converted to the big
number representation being used, whether that is Python long
integers, MPZ objects, or whatever."""
for key in self.keydata:
if d.has_key(key): self.__dict__[key]=bignum(d[key])
def encrypt(self, plaintext, K):
"""encrypt(plaintext:string|long, K:string|long) : tuple
Encrypt the string or integer plaintext. K is a random
parameter required by some algorithms.
"""
wasString=0
if isinstance(plaintext, types.StringType):
plaintext=bytes_to_long(plaintext) ; wasString=1
if isinstance(K, types.StringType):
K=bytes_to_long(K)
ciphertext=self._encrypt(plaintext, K)
if wasString: return tuple(map(long_to_bytes, ciphertext))
else: return ciphertext
def decrypt(self, ciphertext):
"""decrypt(ciphertext:tuple|string|long): string
Decrypt 'ciphertext' using this key.
"""
wasString=0
if not isinstance(ciphertext, types.TupleType):
ciphertext=(ciphertext,)
if isinstance(ciphertext[0], types.StringType):
ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1
plaintext=self._decrypt(ciphertext)
if wasString: return long_to_bytes(plaintext)
else: return plaintext
def sign(self, M, K):
"""sign(M : string|long, K:string|long) : tuple
Return a tuple containing the signature for the message M.
K is a random parameter required by some algorithms.
"""
if (not self.has_private()):
raise error, 'Private key not available in this object'
if isinstance(M, types.StringType): M=bytes_to_long(M)
if isinstance(K, types.StringType): K=bytes_to_long(K)
return self._sign(M, K)
def verify (self, M, signature):
"""verify(M:string|long, signature:tuple) : bool
Verify that the signature is valid for the message M;
returns true if the signature checks out.
"""
if isinstance(M, types.StringType): M=bytes_to_long(M)
return self._verify(M, signature)
# alias to compensate for the old validate() name
def validate (self, M, signature):
warnings.warn("validate() method name is obsolete; use verify()",
DeprecationWarning)
def blind(self, M, B):
"""blind(M : string|long, B : string|long) : string|long
Blind message M using blinding factor B.
"""
wasString=0
if isinstance(M, types.StringType):
M=bytes_to_long(M) ; wasString=1
if isinstance(B, types.StringType): B=bytes_to_long(B)
blindedmessage=self._blind(M, B)
if wasString: return long_to_bytes(blindedmessage)
else: return blindedmessage
def unblind(self, M, B):
"""unblind(M : string|long, B : string|long) : string|long
Unblind message M using blinding factor B.
"""
wasString=0
if isinstance(M, types.StringType):
M=bytes_to_long(M) ; wasString=1
if isinstance(B, types.StringType): B=bytes_to_long(B)
unblindedmessage=self._unblind(M, B)
if wasString: return long_to_bytes(unblindedmessage)
else: return unblindedmessage
# The following methods will usually be left alone, except for
# signature-only algorithms. They both return Boolean values
# recording whether this key's algorithm can sign and encrypt.
def can_sign (self):
"""can_sign() : bool
Return a Boolean value recording whether this algorithm can
generate signatures. (This does not imply that this
particular key object has the private information required to
to generate a signature.)
"""
return 1
def can_encrypt (self):
"""can_encrypt() : bool
Return a Boolean value recording whether this algorithm can
encrypt data. (This does not imply that this
particular key object has the private information required to
to decrypt a message.)
"""
return 1
def can_blind (self):
"""can_blind() : bool
Return a Boolean value recording whether this algorithm can
blind data. (This does not imply that this
particular key object has the private information required to
to blind a message.)
"""
return 0
# The following methods will certainly be overridden by
# subclasses.
def size (self):
"""size() : int
Return the maximum number of bits that can be handled by this key.
"""
return 0
def has_private (self):
"""has_private() : bool
Return a Boolean denoting whether the object contains
private components.
"""
return 0
def publickey (self):
"""publickey(): object
Return a new key object containing only the public information.
"""
return self
def __eq__ (self, other):
"""__eq__(other): 0, 1
Compare us to other for equality.
"""
return self.__getstate__() == other.__getstate__()

View File

@ -0,0 +1,170 @@
#
# qNEW.py : The q-NEW signature algorithm.
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util.number import *
from Crypto.Hash import SHA
class error (Exception):
pass
HASHBITS = 160 # Size of SHA digests
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a qNEW key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=qNEWobj()
# Generate prime numbers p and q. q is a 160-bit prime
# number. p is another prime number (the modulus) whose bit
# size is chosen by the caller, and is generated so that p-1
# is a multiple of q.
#
# Note that only a single seed is used to
# generate p and q; if someone generates a key for you, you can
# use the seed to duplicate the key generation. This can
# protect you from someone generating values of p,q that have
# some special form that's easy to break.
if progress_func:
progress_func('p,q\n')
while (1):
obj.q = getPrime(160, randfunc)
# assert pow(2, 159L)<obj.q<pow(2, 160L)
obj.seed = S = long_to_bytes(obj.q)
C, N, V = 0, 2, {}
# Compute b and n such that bits-1 = b + n*HASHBITS
n= (bits-1) / HASHBITS
b= (bits-1) % HASHBITS ; powb=2L << b
powL1=pow(long(2), bits-1)
while C<4096:
# The V array will contain (bits-1) bits of random
# data, that are assembled to produce a candidate
# value for p.
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
p = V[n] % powb
for k in range(n-1, -1, -1):
p= (p << long(HASHBITS) )+V[k]
p = p+powL1 # Ensure the high bit is set
# Ensure that p-1 is a multiple of q
p = p - (p % (2*obj.q)-1)
# If p is still the right size, and it's prime, we're done!
if powL1<=p and isPrime(p):
break
# Otherwise, increment the counter and try again
C, N = C+1, N+n+1
if C<4096:
break # Ended early, so exit the while loop
if progress_func:
progress_func('4096 values of p tried\n')
obj.p = p
power=(p-1)/obj.q
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
# number <p-1, and g>1. g is kept; h can be discarded.
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
# x is the private key information, and is
# just a random number between 0 and q.
# y=g**x mod p, and is part of the public information.
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y=x, pow(g, x, p)
return obj
# Construct a qNEW object
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
Construct a qNEW object from a 4- or 5-tuple of numbers.
"""
obj=qNEWobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class qNEWobj(pubkey.pubkey):
keydata=['p', 'q', 'g', 'y', 'x']
def _sign(self, M, K=''):
if (self.q<=K):
raise error, 'K is greater than q'
if M<0:
raise error, 'Illegal value of M (<0)'
if M>=pow(2,161L):
raise error, 'Illegal value of M (too large)'
r=pow(self.g, K, self.p) % self.q
s=(K- (r*M*self.x % self.q)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
if M<0:
raise error, 'Illegal value of M (<0)'
if M<=0 or M>=pow(2,161L):
return 0
v1 = pow(self.g, s, self.p)
v2 = pow(self.y, M*r, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return 160
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
return hasattr(self, 'x')
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.q, self.g, self.y))
object = qNEWobj

View File

@ -0,0 +1,342 @@
#!/usr/local/bin/python
# rfc1751.py : Converts between 128-bit strings and a human-readable
# sequence of words, as defined in RFC1751: "A Convention for
# Human-Readable 128-bit Keys", by Daniel L. McDonald.
__revision__ = "$Id: RFC1751.py,v 1.6 2003/04/04 15:15:10 akuchling Exp $"
import string, binascii
binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101',
6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011',
12:'1100', 13:'1101', 14:'1110', 15:'1111'}
def _key2bin(s):
"Convert a key into a string of binary digits"
kl=map(lambda x: ord(x), s)
kl=map(lambda x: binary[x/16]+binary[x&15], kl)
return ''.join(kl)
def _extract(key, start, length):
"""Extract a bitstring from a string of binary digits, and return its
numeric value."""
k=key[start:start+length]
return reduce(lambda x,y: x*2+ord(y)-48, k, 0)
def key_to_english (key):
"""key_to_english(key:string) : string
Transform an arbitrary key into a string containing English words.
The key length must be a multiple of 8.
"""
english=''
for index in range(0, len(key), 8): # Loop over 8-byte subkeys
subkey=key[index:index+8]
# Compute the parity of the key
skbin=_key2bin(subkey) ; p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
# Append parity bits to the subkey
skbin=_key2bin(subkey+chr((p<<6) & 255))
for i in range(0, 64, 11):
english=english+wordlist[_extract(skbin, i, 11)]+' '
return english[:-1] # Remove the trailing space
def english_to_key (str):
"""english_to_key(string):string
Transform a string into a corresponding key.
The string must contain words separated by whitespace; the number
of words must be a multiple of 6.
"""
L=string.split(string.upper(str)) ; key=''
for index in range(0, len(L), 6):
sublist=L[index:index+6] ; char=9*[0] ; bits=0
for i in sublist:
index = wordlist.index(i)
shift = (8-(bits+11)%8) %8
y = index << shift
cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff
if (shift>5):
char[bits/8] = char[bits/8] | cl
char[bits/8+1] = char[bits/8+1] | cc
char[bits/8+2] = char[bits/8+2] | cr
elif shift>-3:
char[bits/8] = char[bits/8] | cc
char[bits/8+1] = char[bits/8+1] | cr
else: char[bits/8] = char[bits/8] | cr
bits=bits+11
subkey=reduce(lambda x,y:x+chr(y), char, '')
# Check the parity of the resulting key
skbin=_key2bin(subkey)
p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
if (p&3) != _extract(skbin, 64, 2):
raise ValueError, "Parity error in resulting key"
key=key+subkey[0:8]
return key
wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD",
"AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA",
"AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK",
"ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE",
"AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM",
"BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET",
"BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO",
"BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT",
"BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT",
"CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY",
"CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN",
"DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG",
"DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB",
"DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO",
"ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE",
"EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW",
"FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR",
"FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP",
"GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO",
"GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD",
"HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM",
"HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT",
"HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE",
"HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL",
"INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT",
"ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET",
"JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT",
"KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB",
"LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE",
"LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT",
"LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG",
"LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW",
"MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT",
"MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG",
"MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED",
"NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD",
"NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF",
"OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL",
"OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT",
"OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD",
"PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG",
"PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT",
"PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB",
"PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT",
"RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM",
"RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB",
"RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM",
"SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET",
"SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY",
"SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY",
"SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN",
"TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE",
"TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP",
"TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP",
"US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS",
"WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT",
"WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE",
"YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT",
"ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS",
"ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE",
"AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA",
"ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN",
"AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW",
"ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA",
"ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM",
"AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW",
"AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL",
"BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM",
"BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK",
"BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH",
"BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT",
"BEAU", "BECK", "BEEF", "BEEN", "BEER",
"BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN",
"BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE",
"BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE",
"BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT",
"BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK",
"BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT",
"BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK",
"BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS",
"BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN",
"BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD",
"BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG",
"BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST",
"BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF",
"CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL",
"CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL",
"CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF",
"CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG",
"CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY",
"CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA",
"COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN",
"COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK",
"COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST",
"COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB",
"CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY",
"CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE",
"DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN",
"DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS",
"DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED",
"DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK",
"DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT",
"DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES",
"DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA",
"DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG",
"DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK",
"DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK",
"DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST",
"EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT",
"EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT",
"EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED",
"FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL",
"FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT",
"FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST",
"FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE",
"FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE",
"FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW",
"FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM",
"FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL",
"FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL",
"FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY",
"FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY",
"FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA",
"GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH",
"GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE",
"GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT",
"GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN",
"GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD",
"GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG",
"GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB",
"GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN",
"GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH",
"GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR",
"HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK",
"HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE",
"HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR",
"HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL",
"HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN",
"HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT",
"HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE",
"HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK",
"HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL",
"HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK",
"HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE",
"HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH",
"INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE",
"ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE",
"JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL",
"JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN",
"JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY",
"JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST",
"JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL",
"KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL",
"KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW",
"KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD",
"KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN",
"LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD",
"LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS",
"LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER",
"LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST",
"LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU",
"LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB",
"LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST",
"LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE",
"LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD",
"LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK",
"LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE",
"LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE",
"MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI",
"MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK",
"MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE",
"MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK",
"MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH",
"MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT",
"MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS",
"MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD",
"MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON",
"MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH",
"MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK",
"MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL",
"NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR",
"NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS",
"NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA",
"NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON",
"NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB",
"OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY",
"OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE",
"ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS",
"OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY",
"OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT",
"RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE",
"RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR",
"RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA",
"REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT",
"RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD",
"ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME",
"ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS",
"ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY",
"RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE",
"RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE",
"SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE",
"SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR",
"SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK",
"SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS",
"SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN",
"SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE",
"SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE",
"SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW",
"SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY",
"SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT",
"SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB",
"SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA",
"SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE",
"SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR",
"STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH",
"SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF",
"SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM",
"TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK",
"TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM",
"TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS",
"TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN",
"THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER",
"TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY",
"TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG",
"TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR",
"TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG",
"TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE",
"TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK",
"TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER",
"USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST",
"VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY",
"VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE",
"WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK",
"WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM",
"WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY",
"WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR",
"WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM",
"WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE",
"WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE",
"WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD",
"WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE",
"YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR",
"YELL", "YOGA", "YOKE" ]
if __name__=='__main__':
data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
('CCAC2AED591056BE4F90FD441C534766',
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
('EFF81F9BFBC65350920CDD7416DE8009',
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
]
for key, words in data:
print 'Trying key', key
key=binascii.a2b_hex(key)
w2=key_to_english(key)
if w2!=words:
print 'key_to_english fails on key', repr(key), ', producing', str(w2)
k2=english_to_key(words)
if k2!=key:
print 'english_to_key fails on key', repr(key), ', producing', repr(k2)

View File

@ -0,0 +1,16 @@
"""Miscellaneous modules
Contains useful modules that don't belong into any of the
other Crypto.* subpackages.
Crypto.Util.number Number-theoretic functions (primality testing, etc.)
Crypto.Util.randpool Random number generation
Crypto.Util.RFC1751 Converts between 128-bit keys and human-readable
strings of words.
"""
__all__ = ['randpool', 'RFC1751', 'number']
__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:26:00 akuchling Exp $"

View File

@ -0,0 +1,201 @@
#
# number.py : Number-theoretic functions
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: number.py,v 1.13 2003/04/04 18:21:07 akuchling Exp $"
bignum = long
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
# Commented out and replaced with faster versions below
## def long2str(n):
## s=''
## while n>0:
## s=chr(n & 255)+s
## n=n>>8
## return s
## import types
## def str2long(s):
## if type(s)!=types.StringType: return s # Integers will be left alone
## return reduce(lambda x,y : x*256+ord(y), s, 0L)
def size (N):
"""size(N:long) : int
Returns the size of the number N in bits.
"""
bits, power = 0,1L
while N >= power:
bits += 1
power = power << 1
return bits
def getRandomNumber(N, randfunc):
"""getRandomNumber(N:int, randfunc:callable):long
Return an N-bit random number."""
S = randfunc(N/8)
odd_bits = N % 8
if odd_bits != 0:
char = ord(randfunc(1)) >> (8-odd_bits)
S = chr(char) + S
value = bytes_to_long(S)
value |= 2L ** (N-1) # Ensure high bit is set
assert size(value) >= N
return value
def GCD(x,y):
"""GCD(x:long, y:long): long
Return the GCD of x and y.
"""
x = abs(x) ; y = abs(y)
while x > 0:
x, y = y % x, x
return y
def inverse(u, v):
"""inverse(u:long, u:long):long
Return the inverse of u mod v.
"""
u3, v3 = long(u), long(v)
u1, v1 = 1L, 0L
while v3 > 0:
q=u3 / v3
u1, v1 = v1, u1 - v1*q
u3, v3 = v3, u3 - v3*q
while u1<0:
u1 = u1 + v
return u1
# Given a number of bits to generate and a random generation function,
# find a prime number of the appropriate size.
def getPrime(N, randfunc):
"""getPrime(N:int, randfunc:callable):long
Return a random N-bit prime number.
"""
number=getRandomNumber(N, randfunc) | 1
while (not isPrime(number)):
number=number+2
return number
def isPrime(N):
"""isPrime(N:long):bool
Return true if N is prime.
"""
if N == 1:
return 0
if N in sieve:
return 1
for i in sieve:
if (N % i)==0:
return 0
# Use the accelerator if available
if _fastmath is not None:
return _fastmath.isPrime(N)
# Compute the highest bit that's set in N
N1 = N - 1L
n = 1L
while (n<N):
n=n<<1L
n = n >> 1L
# Rabin-Miller test
for c in sieve[:7]:
a=long(c) ; d=1L ; t=n
while (t): # Iterate over the bits in N1
x=(d*d) % N
if x==1L and d!=1L and d!=N1:
return 0 # Square root of 1 found
if N1 & t:
d=(x*a) % N
else:
d=x
t = t >> 1L
if d!=1L:
return 0
return 1
# Small primes used for checking primality; these are all the primes
# less than 256. This should be enough to eliminate most of the odd
# numbers before needing to do a Rabin-Miller test at all.
sieve=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
197, 199, 211, 223, 227, 229, 233, 239, 241, 251]
# Improved conversion functions contributed by Barry Warsaw, after
# careful benchmarking
import struct
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = ''
n = long(n)
pack = struct.pack
while n > 0:
s = pack('>I', n & 0xffffffffL) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != '\000':
break
else:
# only happens when n == 0
s = '\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * '\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0L
unpack = struct.unpack
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = '\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + unpack('>I', s[i:i+4])[0]
return acc
# For backwards compatibility...
import warnings
def long2str(n, blocksize=0):
warnings.warn("long2str() has been replaced by long_to_bytes()")
return long_to_bytes(n, blocksize)
def str2long(s):
warnings.warn("str2long() has been replaced by bytes_to_long()")
return bytes_to_long(s)

View File

@ -0,0 +1,421 @@
#
# randpool.py : Cryptographically strong random number generation
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: randpool.py,v 1.14 2004/05/06 12:56:54 akuchling Exp $"
import time, array, types, warnings, os.path
from Crypto.Util.number import long_to_bytes
try:
import Crypto.Util.winrandom as winrandom
except:
winrandom = None
STIRNUM = 3
class RandomPool:
"""randpool.py : Cryptographically strong random number generation.
The implementation here is similar to the one in PGP. To be
cryptographically strong, it must be difficult to determine the RNG's
output, whether in the future or the past. This is done by using
a cryptographic hash function to "stir" the random data.
Entropy is gathered in the same fashion as PGP; the highest-resolution
clock around is read and the data is added to the random number pool.
A conservative estimate of the entropy is then kept.
If a cryptographically secure random source is available (/dev/urandom
on many Unixes, Windows CryptGenRandom on most Windows), then use
it.
Instance Attributes:
bits : int
Maximum size of pool in bits
bytes : int
Maximum size of pool in bytes
entropy : int
Number of bits of entropy in this pool.
Methods:
add_event([s]) : add some entropy to the pool
get_bytes(int) : get N bytes of random data
randomize([N]) : get N bytes of randomness from external source
"""
def __init__(self, numbytes = 160, cipher=None, hash=None):
if hash is None:
from Crypto.Hash import SHA as hash
# The cipher argument is vestigial; it was removed from
# version 1.1 so RandomPool would work even in the limited
# exportable subset of the code
if cipher is not None:
warnings.warn("'cipher' parameter is no longer used")
if isinstance(hash, types.StringType):
# ugly hack to force __import__ to give us the end-path module
hash = __import__('Crypto.Hash.'+hash,
None, None, ['new'])
warnings.warn("'hash' parameter should now be a hashing module")
self.bytes = numbytes
self.bits = self.bytes*8
self.entropy = 0
self._hash = hash
# Construct an array to hold the random pool,
# initializing it to 0.
self._randpool = array.array('B', [0]*self.bytes)
self._event1 = self._event2 = 0
self._addPos = 0
self._getPos = hash.digest_size
self._lastcounter=time.time()
self.__counter = 0
self._measureTickSize() # Estimate timer resolution
self._randomize()
def _updateEntropyEstimate(self, nbits):
self.entropy += nbits
if self.entropy < 0:
self.entropy = 0
elif self.entropy > self.bits:
self.entropy = self.bits
def _randomize(self, N = 0, devname = '/dev/urandom'):
"""_randomize(N, DEVNAME:device-filepath)
collects N bits of randomness from some entropy source (e.g.,
/dev/urandom on Unixes that have it, Windows CryptoAPI
CryptGenRandom, etc)
DEVNAME is optional, defaults to /dev/urandom. You can change it
to /dev/random if you want to block till you get enough
entropy.
"""
data = ''
if N <= 0:
nbytes = int((self.bits - self.entropy)/8+0.5)
else:
nbytes = int(N/8+0.5)
if winrandom:
# Windows CryptGenRandom provides random data.
data = winrandom.new().get_bytes(nbytes)
elif os.path.exists(devname):
# Many OSes support a /dev/urandom device
try:
f=open(devname)
data=f.read(nbytes)
f.close()
except IOError, (num, msg):
if num!=2: raise IOError, (num, msg)
# If the file wasn't found, ignore the error
if data:
self._addBytes(data)
# Entropy estimate: The number of bits of
# data obtained from the random source.
self._updateEntropyEstimate(8*len(data))
self.stir_n() # Wash the random pool
def randomize(self, N=0):
"""randomize(N:int)
use the class entropy source to get some entropy data.
This is overridden by KeyboardRandomize().
"""
return self._randomize(N)
def stir_n(self, N = STIRNUM):
"""stir_n(N)
stirs the random pool N times
"""
for i in xrange(N):
self.stir()
def stir (self, s = ''):
"""stir(s:string)
Mix up the randomness pool. This will call add_event() twice,
but out of paranoia the entropy attribute will not be
increased. The optional 's' parameter is a string that will
be hashed with the randomness pool.
"""
entropy=self.entropy # Save inital entropy value
self.add_event()
# Loop over the randomness pool: hash its contents
# along with a counter, and add the resulting digest
# back into the pool.
for i in range(self.bytes / self._hash.digest_size):
h = self._hash.new(self._randpool)
h.update(str(self.__counter) + str(i) + str(self._addPos) + s)
self._addBytes( h.digest() )
self.__counter = (self.__counter + 1) & 0xFFFFffffL
self._addPos, self._getPos = 0, self._hash.digest_size
self.add_event()
# Restore the old value of the entropy.
self.entropy=entropy
def get_bytes (self, N):
"""get_bytes(N:int) : string
Return N bytes of random data.
"""
s=''
i, pool = self._getPos, self._randpool
h=self._hash.new()
dsize = self._hash.digest_size
num = N
while num > 0:
h.update( self._randpool[i:i+dsize] )
s = s + h.digest()
num = num - dsize
i = (i + dsize) % self.bytes
if i<dsize:
self.stir()
i=self._getPos
self._getPos = i
self._updateEntropyEstimate(- 8*N)
return s[:N]
def add_event(self, s=''):
"""add_event(s:string)
Add an event to the random pool. The current time is stored
between calls and used to estimate the entropy. The optional
's' parameter is a string that will also be XORed into the pool.
Returns the estimated number of additional bits of entropy gain.
"""
event = time.time()*1000
delta = self._noise()
s = (s + long_to_bytes(event) +
4*chr(0xaa) + long_to_bytes(delta) )
self._addBytes(s)
if event==self._event1 and event==self._event2:
# If events are coming too closely together, assume there's
# no effective entropy being added.
bits=0
else:
# Count the number of bits in delta, and assume that's the entropy.
bits=0
while delta:
delta, bits = delta>>1, bits+1
if bits>8: bits=8
self._event1, self._event2 = event, self._event1
self._updateEntropyEstimate(bits)
return bits
# Private functions
def _noise(self):
# Adds a bit of noise to the random pool, by adding in the
# current time and CPU usage of this process.
# The difference from the previous call to _noise() is taken
# in an effort to estimate the entropy.
t=time.time()
delta = (t - self._lastcounter)/self._ticksize*1e6
self._lastcounter = t
self._addBytes(long_to_bytes(long(1000*time.time())))
self._addBytes(long_to_bytes(long(1000*time.clock())))
self._addBytes(long_to_bytes(long(1000*time.time())))
self._addBytes(long_to_bytes(long(delta)))
# Reduce delta to a maximum of 8 bits so we don't add too much
# entropy as a result of this call.
delta=delta % 0xff
return int(delta)
def _measureTickSize(self):
# _measureTickSize() tries to estimate a rough average of the
# resolution of time that you can see from Python. It does
# this by measuring the time 100 times, computing the delay
# between measurements, and taking the median of the resulting
# list. (We also hash all the times and add them to the pool)
interval = [None] * 100
h = self._hash.new(`(id(self),id(interval))`)
# Compute 100 differences
t=time.time()
h.update(`t`)
i = 0
j = 0
while i < 100:
t2=time.time()
h.update(`(i,j,t2)`)
j += 1
delta=int((t2-t)*1e6)
if delta:
interval[i] = delta
i += 1
t=t2
# Take the median of the array of intervals
interval.sort()
self._ticksize=interval[len(interval)/2]
h.update(`(interval,self._ticksize)`)
# mix in the measurement times and wash the random pool
self.stir(h.digest())
def _addBytes(self, s):
"XOR the contents of the string S into the random pool"
i, pool = self._addPos, self._randpool
for j in range(0, len(s)):
pool[i]=pool[i] ^ ord(s[j])
i=(i+1) % self.bytes
self._addPos = i
# Deprecated method names: remove in PCT 2.1 or later.
def getBytes(self, N):
warnings.warn("getBytes() method replaced by get_bytes()",
DeprecationWarning)
return self.get_bytes(N)
def addEvent (self, event, s=""):
warnings.warn("addEvent() method replaced by add_event()",
DeprecationWarning)
return self.add_event(s + str(event))
class PersistentRandomPool (RandomPool):
def __init__ (self, filename=None, *args, **kwargs):
RandomPool.__init__(self, *args, **kwargs)
self.filename = filename
if filename:
try:
# the time taken to open and read the file might have
# a little disk variability, modulo disk/kernel caching...
f=open(filename, 'rb')
self.add_event()
data = f.read()
self.add_event()
# mix in the data from the file and wash the random pool
self.stir(data)
f.close()
except IOError:
# Oh, well; the file doesn't exist or is unreadable, so
# we'll just ignore it.
pass
def save(self):
if self.filename == "":
raise ValueError, "No filename set for this object"
# wash the random pool before save, provides some forward secrecy for
# old values of the pool.
self.stir_n()
f=open(self.filename, 'wb')
self.add_event()
f.write(self._randpool.tostring())
f.close()
self.add_event()
# wash the pool again, provide some protection for future values
self.stir()
# non-echoing Windows keyboard entry
_kb = 0
if not _kb:
try:
import msvcrt
class KeyboardEntry:
def getch(self):
c = msvcrt.getch()
if c in ('\000', '\xe0'):
# function key
c += msvcrt.getch()
return c
def close(self, delay = 0):
if delay:
time.sleep(delay)
while msvcrt.kbhit():
msvcrt.getch()
_kb = 1
except:
pass
# non-echoing Posix keyboard entry
if not _kb:
try:
import termios
class KeyboardEntry:
def __init__(self, fd = 0):
self._fd = fd
self._old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3]=new[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new)
def getch(self):
termios.tcflush(0, termios.TCIFLUSH) # XXX Leave this in?
return os.read(self._fd, 1)
def close(self, delay = 0):
if delay:
time.sleep(delay)
termios.tcflush(self._fd, termios.TCIFLUSH)
termios.tcsetattr(self._fd, termios.TCSAFLUSH, self._old)
_kb = 1
except:
pass
class KeyboardRandomPool (PersistentRandomPool):
def __init__(self, *args, **kwargs):
PersistentRandomPool.__init__(self, *args, **kwargs)
def randomize(self, N = 0):
"Adds N bits of entropy to random pool. If N is 0, fill up pool."
import os, string, time
if N <= 0:
bits = self.bits - self.entropy
else:
bits = N*8
if bits == 0:
return
print bits,'bits of entropy are now required. Please type on the keyboard'
print 'until enough randomness has been accumulated.'
kb = KeyboardEntry()
s='' # We'll save the characters typed and add them to the pool.
hash = self._hash
e = 0
try:
while e < bits:
temp=str(bits-e).rjust(6)
os.write(1, temp)
s=s+kb.getch()
e += self.add_event(s)
os.write(1, 6*chr(8))
self.add_event(s+hash.new(s).digest() )
finally:
kb.close()
print '\n\007 Enough. Please wait a moment.\n'
self.stir_n() # wash the random pool.
kb.close(4)
if __name__ == '__main__':
pool = RandomPool()
print 'random pool entropy', pool.entropy, 'bits'
pool.add_event('something')
print `pool.get_bytes(100)`
import tempfile, os
fname = tempfile.mktemp()
pool = KeyboardRandomPool(filename=fname)
print 'keyboard random pool entropy', pool.entropy, 'bits'
pool.randomize()
print 'keyboard random pool entropy', pool.entropy, 'bits'
pool.randomize(128)
pool.save()
saved = open(fname, 'rb').read()
print 'saved', `saved`
print 'pool ', `pool._randpool.tostring()`
newpool = PersistentRandomPool(fname)
print 'persistent random pool entropy', pool.entropy, 'bits'
os.remove(fname)

View File

@ -0,0 +1,453 @@
#
# test.py : Functions used for testing the modules
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: test.py,v 1.16 2004/08/13 22:24:18 akuchling Exp $"
import binascii
import string
import testdata
from Crypto.Cipher import *
def die(string):
import sys
print '***ERROR: ', string
# sys.exit(0) # Will default to continuing onward...
def print_timing (size, delta, verbose):
if verbose:
if delta == 0:
print 'Unable to measure time -- elapsed time too small'
else:
print '%.2f K/sec' % (size/delta)
def exerciseBlockCipher(cipher, verbose):
import string, time
try:
ciph = eval(cipher)
except NameError:
print cipher, 'module not available'
return None
print cipher+ ':'
str='1' # Build 128K of test data
for i in xrange(0, 17):
str=str+str
if ciph.key_size==0: ciph.key_size=16
password = 'password12345678Extra text for password'[0:ciph.key_size]
IV = 'Test IV Test IV Test IV Test'[0:ciph.block_size]
if verbose: print ' ECB mode:',
obj=ciph.new(password, ciph.MODE_ECB)
if obj.block_size != ciph.block_size:
die("Module and cipher object block_size don't match")
text='1234567812345678'[0:ciph.block_size]
c=obj.encrypt(text)
if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"')
text='KuchlingKuchling'[0:ciph.block_size]
c=obj.encrypt(text)
if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"')
text='NotTodayNotEver!'[0:ciph.block_size]
c=obj.encrypt(text)
if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"')
start=time.time()
s=obj.encrypt(str)
s2=obj.decrypt(s)
end=time.time()
if (str!=s2):
die('Error in resulting plaintext from ECB mode')
print_timing(256, end-start, verbose)
del obj
if verbose: print ' CFB mode:',
obj1=ciph.new(password, ciph.MODE_CFB, IV)
obj2=ciph.new(password, ciph.MODE_CFB, IV)
start=time.time()
ciphertext=obj1.encrypt(str[0:65536])
plaintext=obj2.decrypt(ciphertext)
end=time.time()
if (plaintext!=str[0:65536]):
die('Error in resulting plaintext from CFB mode')
print_timing(64, end-start, verbose)
del obj1, obj2
if verbose: print ' CBC mode:',
obj1=ciph.new(password, ciph.MODE_CBC, IV)
obj2=ciph.new(password, ciph.MODE_CBC, IV)
start=time.time()
ciphertext=obj1.encrypt(str)
plaintext=obj2.decrypt(ciphertext)
end=time.time()
if (plaintext!=str):
die('Error in resulting plaintext from CBC mode')
print_timing(256, end-start, verbose)
del obj1, obj2
if verbose: print ' PGP mode:',
obj1=ciph.new(password, ciph.MODE_PGP, IV)
obj2=ciph.new(password, ciph.MODE_PGP, IV)
start=time.time()
ciphertext=obj1.encrypt(str)
plaintext=obj2.decrypt(ciphertext)
end=time.time()
if (plaintext!=str):
die('Error in resulting plaintext from PGP mode')
print_timing(256, end-start, verbose)
del obj1, obj2
if verbose: print ' OFB mode:',
obj1=ciph.new(password, ciph.MODE_OFB, IV)
obj2=ciph.new(password, ciph.MODE_OFB, IV)
start=time.time()
ciphertext=obj1.encrypt(str)
plaintext=obj2.decrypt(ciphertext)
end=time.time()
if (plaintext!=str):
die('Error in resulting plaintext from OFB mode')
print_timing(256, end-start, verbose)
del obj1, obj2
def counter(length=ciph.block_size):
return length * 'a'
if verbose: print ' CTR mode:',
obj1=ciph.new(password, ciph.MODE_CTR, counter=counter)
obj2=ciph.new(password, ciph.MODE_CTR, counter=counter)
start=time.time()
ciphertext=obj1.encrypt(str)
plaintext=obj2.decrypt(ciphertext)
end=time.time()
if (plaintext!=str):
die('Error in resulting plaintext from CTR mode')
print_timing(256, end-start, verbose)
del obj1, obj2
# Test the IV handling
if verbose: print ' Testing IV handling'
obj1=ciph.new(password, ciph.MODE_CBC, IV)
plaintext='Test'*(ciph.block_size/4)*3
ciphertext1=obj1.encrypt(plaintext)
obj1.IV=IV
ciphertext2=obj1.encrypt(plaintext)
if ciphertext1!=ciphertext2:
die('Error in setting IV')
# Test keyword arguments
obj1=ciph.new(key=password)
obj1=ciph.new(password, mode=ciph.MODE_CBC)
obj1=ciph.new(mode=ciph.MODE_CBC, key=password)
obj1=ciph.new(IV=IV, mode=ciph.MODE_CBC, key=password)
return ciph
def exerciseStreamCipher(cipher, verbose):
import string, time
try:
ciph = eval(cipher)
except (NameError):
print cipher, 'module not available'
return None
print cipher + ':',
str='1' # Build 128K of test data
for i in xrange(0, 17):
str=str+str
key_size = ciph.key_size or 16
password = 'password12345678Extra text for password'[0:key_size]
obj1=ciph.new(password)
obj2=ciph.new(password)
if obj1.block_size != ciph.block_size:
die("Module and cipher object block_size don't match")
if obj1.key_size != ciph.key_size:
die("Module and cipher object key_size don't match")
text='1234567812345678Python'
c=obj1.encrypt(text)
if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"')
text='B1FF I2 A R3A11Y |<00L D00D!!!!!'
c=obj1.encrypt(text)
if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"')
text='SpamSpamSpamSpamSpamSpamSpamSpamSpam'
c=obj1.encrypt(text)
if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"')
start=time.time()
s=obj1.encrypt(str)
str=obj2.decrypt(s)
end=time.time()
print_timing(256, end-start, verbose)
del obj1, obj2
return ciph
def TestStreamModules(args=['arc4', 'XOR'], verbose=1):
import sys, string
args=map(string.lower, args)
if 'arc4' in args:
# Test ARC4 stream cipher
arc4=exerciseStreamCipher('ARC4', verbose)
if (arc4!=None):
for entry in testdata.arc4:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=arc4.new(key)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('ARC4 failed on entry '+`entry`)
if 'xor' in args:
# Test XOR stream cipher
XOR=exerciseStreamCipher('XOR', verbose)
if (XOR!=None):
for entry in testdata.xor:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=XOR.new(key)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('XOR failed on entry '+`entry`)
def TestBlockModules(args=['aes', 'arc2', 'des', 'blowfish', 'cast', 'des3',
'idea', 'rc5'],
verbose=1):
import string
args=map(string.lower, args)
if 'aes' in args:
ciph=exerciseBlockCipher('AES', verbose) # AES
if (ciph!=None):
if verbose: print ' Verifying against test suite...'
for entry in testdata.aes:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=ciph.new(key, ciph.MODE_ECB)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('AES failed on entry '+`entry`)
for i in ciphertext:
if verbose: print hex(ord(i)),
if verbose: print
for entry in testdata.aes_modes:
mode, key, plain, cipher, kw = entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=ciph.new(key, mode, **kw)
obj2=ciph.new(key, mode, **kw)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('AES encrypt failed on entry '+`entry`)
for i in ciphertext:
if verbose: print hex(ord(i)),
if verbose: print
plain2=obj2.decrypt(ciphertext)
if plain2!=plain:
die('AES decrypt failed on entry '+`entry`)
for i in plain2:
if verbose: print hex(ord(i)),
if verbose: print
if 'arc2' in args:
ciph=exerciseBlockCipher('ARC2', verbose) # Alleged RC2
if (ciph!=None):
if verbose: print ' Verifying against test suite...'
for entry in testdata.arc2:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=ciph.new(key, ciph.MODE_ECB)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('ARC2 failed on entry '+`entry`)
for i in ciphertext:
if verbose: print hex(ord(i)),
print
if 'blowfish' in args:
ciph=exerciseBlockCipher('Blowfish',verbose)# Bruce Schneier's Blowfish cipher
if (ciph!=None):
if verbose: print ' Verifying against test suite...'
for entry in testdata.blowfish:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=ciph.new(key, ciph.MODE_ECB)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('Blowfish failed on entry '+`entry`)
for i in ciphertext:
if verbose: print hex(ord(i)),
if verbose: print
if 'cast' in args:
ciph=exerciseBlockCipher('CAST', verbose) # CAST-128
if (ciph!=None):
if verbose: print ' Verifying against test suite...'
for entry in testdata.cast:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=ciph.new(key, ciph.MODE_ECB)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('CAST failed on entry '+`entry`)
for i in ciphertext:
if verbose: print hex(ord(i)),
if verbose: print
if 0:
# The full-maintenance test; it requires 4 million encryptions,
# and correspondingly is quite time-consuming. I've disabled
# it; it's faster to compile block/cast.c with -DTEST and run
# the resulting program.
a = b = '\x01\x23\x45\x67\x12\x34\x56\x78\x23\x45\x67\x89\x34\x56\x78\x9A'
for i in range(0, 1000000):
obj = cast.new(b, cast.MODE_ECB)
a = obj.encrypt(a[:8]) + obj.encrypt(a[-8:])
obj = cast.new(a, cast.MODE_ECB)
b = obj.encrypt(b[:8]) + obj.encrypt(b[-8:])
if a!="\xEE\xA9\xD0\xA2\x49\xFD\x3B\xA6\xB3\x43\x6F\xB8\x9D\x6D\xCA\x92":
if verbose: print 'CAST test failed: value of "a" doesn\'t match'
if b!="\xB2\xC9\x5E\xB0\x0C\x31\xAD\x71\x80\xAC\x05\xB8\xE8\x3D\x69\x6E":
if verbose: print 'CAST test failed: value of "b" doesn\'t match'
if 'des' in args:
# Test/benchmark DES block cipher
des=exerciseBlockCipher('DES', verbose)
if (des!=None):
# Various tests taken from the DES library packaged with Kerberos V4
obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_ECB)
s=obj.encrypt('Now is t')
if (s!=binascii.a2b_hex('3fa40e8a984d4815')):
die('DES fails test 1')
obj=des.new(binascii.a2b_hex('08192a3b4c5d6e7f'), des.MODE_ECB)
s=obj.encrypt('\000\000\000\000\000\000\000\000')
if (s!=binascii.a2b_hex('25ddac3e96176467')):
die('DES fails test 2')
obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC,
binascii.a2b_hex('1234567890abcdef'))
s=obj.encrypt("Now is the time for all ")
if (s!=binascii.a2b_hex('e5c7cdde872bf27c43e934008c389c0f683788499a7c05f6')):
die('DES fails test 3')
obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC,
binascii.a2b_hex('fedcba9876543210'))
s=obj.encrypt("7654321 Now is the time for \000\000\000\000")
if (s!=binascii.a2b_hex("ccd173ffab2039f4acd8aefddfd8a1eb468e91157888ba681d269397f7fe62b4")):
die('DES fails test 4')
del obj,s
# R. Rivest's test: see http://theory.lcs.mit.edu/~rivest/destest.txt
x=binascii.a2b_hex('9474B8E8C73BCA7D')
for i in range(0, 16):
obj=des.new(x, des.MODE_ECB)
if (i & 1): x=obj.decrypt(x)
else: x=obj.encrypt(x)
if x!=binascii.a2b_hex('1B1A2DDB4C642438'):
die("DES fails Rivest's test")
if verbose: print ' Verifying against test suite...'
for entry in testdata.des:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=des.new(key, des.MODE_ECB)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('DES failed on entry '+`entry`)
for entry in testdata.des_cbc:
key, iv, plain, cipher=entry
key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher)
obj1=des.new(key, des.MODE_CBC, iv)
obj2=des.new(key, des.MODE_CBC, iv)
ciphertext=obj1.encrypt(plain)
if (ciphertext!=cipher):
die('DES CBC mode failed on entry '+`entry`)
if 'des3' in args:
ciph=exerciseBlockCipher('DES3', verbose) # Triple DES
if (ciph!=None):
if verbose: print ' Verifying against test suite...'
for entry in testdata.des3:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=ciph.new(key, ciph.MODE_ECB)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('DES3 failed on entry '+`entry`)
for i in ciphertext:
if verbose: print hex(ord(i)),
if verbose: print
for entry in testdata.des3_cbc:
key, iv, plain, cipher=entry
key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher)
obj1=ciph.new(key, ciph.MODE_CBC, iv)
obj2=ciph.new(key, ciph.MODE_CBC, iv)
ciphertext=obj1.encrypt(plain)
if (ciphertext!=cipher):
die('DES3 CBC mode failed on entry '+`entry`)
if 'idea' in args:
ciph=exerciseBlockCipher('IDEA', verbose) # IDEA block cipher
if (ciph!=None):
if verbose: print ' Verifying against test suite...'
for entry in testdata.idea:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=ciph.new(key, ciph.MODE_ECB)
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('IDEA failed on entry '+`entry`)
if 'rc5' in args:
# Ronald Rivest's RC5 algorithm
ciph=exerciseBlockCipher('RC5', verbose)
if (ciph!=None):
if verbose: print ' Verifying against test suite...'
for entry in testdata.rc5:
key,plain,cipher=entry
key=binascii.a2b_hex(key)
plain=binascii.a2b_hex(plain)
cipher=binascii.a2b_hex(cipher)
obj=ciph.new(key[4:], ciph.MODE_ECB,
version =ord(key[0]),
word_size=ord(key[1]),
rounds =ord(key[2]) )
ciphertext=obj.encrypt(plain)
if (ciphertext!=cipher):
die('RC5 failed on entry '+`entry`)
for i in ciphertext:
if verbose: print hex(ord(i)),
if verbose: print

View File

@ -0,0 +1,25 @@
"""Python Cryptography Toolkit
A collection of cryptographic modules implementing various algorithms
and protocols.
Subpackages:
Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4)
Crypto.Hash Hashing algorithms (MD5, SHA, HMAC)
Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing
transform). This package does not contain any
network protocols.
Crypto.PublicKey Public-key encryption and signature algorithms
(RSA, DSA)
Crypto.Util Various useful modules and functions (long-to-string
conversion, random number generation, number
theoretic functions)
"""
__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util']
__version__ = '2.0.1'
__revision__ = "$Id: __init__.py,v 1.12 2005/06/14 01:20:22 akuchling Exp $"

View File

@ -0,0 +1,38 @@
#
# Test script for the Python Cryptography Toolkit.
#
__revision__ = "$Id: test.py,v 1.7 2002/07/11 14:31:19 akuchling Exp $"
import os, sys
# Add the build directory to the front of sys.path
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
s = os.path.join(os.getcwd(), s)
sys.path.insert(0, s)
s = os.path.join(os.getcwd(), 'test')
sys.path.insert(0, s)
from Crypto.Util import test
args = sys.argv[1:]
quiet = "--quiet" in args
if quiet: args.remove('--quiet')
if not quiet:
print '\nStream Ciphers:'
print '==============='
if args: test.TestStreamModules(args, verbose= not quiet)
else: test.TestStreamModules(verbose= not quiet)
if not quiet:
print '\nBlock Ciphers:'
print '=============='
if args: test.TestBlockModules(args, verbose= not quiet)
else: test.TestBlockModules(verbose= not quiet)

835
python/gdata/__init__.py Normal file
View File

@ -0,0 +1,835 @@
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Google Data elements.
Extends Atom classes to add Google Data specific elements.
"""
__author__ = 'j.s@google.com (Jeffrey Scudder)'
import os
import atom
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
# XML namespaces which are often used in GData entities.
GDATA_NAMESPACE = 'http://schemas.google.com/g/2005'
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/'
OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch'
GACL_NAMESPACE = 'http://schemas.google.com/acl/2007'
GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class MediaSource(object):
"""GData Entries can refer to media sources, so this class provides a
place to store references to these objects along with some metadata.
"""
def __init__(self, file_handle=None, content_type=None, content_length=None,
file_path=None, file_name=None):
"""Creates an object of type MediaSource.
Args:
file_handle: A file handle pointing to the file to be encapsulated in the
MediaSource
content_type: string The MIME type of the file. Required if a file_handle
is given.
content_length: int The size of the file. Required if a file_handle is
given.
file_path: string (optional) A full path name to the file. Used in
place of a file_handle.
file_name: string The name of the file without any path information.
Required if a file_handle is given.
"""
self.file_handle = file_handle
self.content_type = content_type
self.content_length = content_length
self.file_name = file_name
if (file_handle is None and content_type is not None and
file_path is not None):
self.setFile(file_path, content_type)
def setFile(self, file_name, content_type):
"""A helper function which can create a file handle from a given filename
and set the content type and length all at once.
Args:
file_name: string The path and file name to the file containing the media
content_type: string A MIME type representing the type of the media
"""
self.file_handle = open(file_name, 'rb')
self.content_type = content_type
self.content_length = os.path.getsize(file_name)
self.file_name = os.path.basename(file_name)
class LinkFinder(atom.LinkFinder):
"""An "interface" providing methods to find link elements
GData Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in GData entries.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetEditMediaLink(self):
"""The Picasa API mistakenly returns media-edit rather than edit-media, but
this may change soon.
"""
for a_link in self.link:
if a_link.rel == 'edit-media':
return a_link
if a_link.rel == 'media-edit':
return a_link
return None
def GetHtmlLink(self):
"""Find the first link with rel of alternate and type of text/html
Returns:
An atom.Link or None if no links matched
"""
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
def GetPostLink(self):
"""Get a link containing the POST target URL.
The POST target URL is used to insert new entries.
Returns:
A link object with a rel matching the POST type.
"""
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/g/2005#post':
return a_link
return None
def GetAclLink(self):
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList':
return a_link
return None
def GetFeedLink(self):
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/g/2005#feed':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetPrevLink(self):
for a_link in self.link:
if a_link.rel == 'previous':
return a_link
return None
class TotalResults(atom.AtomBase):
"""opensearch:TotalResults for a GData feed"""
_tag = 'totalResults'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TotalResultsFromString(xml_string):
return atom.CreateClassFromXMLString(TotalResults, xml_string)
class StartIndex(atom.AtomBase):
"""The opensearch:startIndex element in GData feed"""
_tag = 'startIndex'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def StartIndexFromString(xml_string):
return atom.CreateClassFromXMLString(StartIndex, xml_string)
class ItemsPerPage(atom.AtomBase):
"""The opensearch:itemsPerPage element in GData feed"""
_tag = 'itemsPerPage'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ItemsPerPageFromString(xml_string):
return atom.CreateClassFromXMLString(ItemsPerPage, xml_string)
class ExtendedProperty(atom.AtomBase):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_tag = 'extendedProperty'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GetXmlBlobExtensionElement(self):
"""Returns the XML blob as an atom.ExtensionElement.
Returns:
An atom.ExtensionElement representing the blob's XML, or None if no
blob was set.
"""
if len(self.extension_elements) < 1:
return None
else:
return self.extension_elements[0]
def GetXmlBlobString(self):
"""Returns the XML blob as a string.
Returns:
A string containing the blob's XML, or None if no blob was set.
"""
blob = self.GetXmlBlobExtensionElement()
if blob:
return blob.ToString()
return None
def SetXmlBlob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting extension elements
in this object.
Args:
blob: str, ElementTree Element or atom.ExtensionElement representing
the XML blob stored in the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
self.extension_elements = []
if isinstance(blob, atom.ExtensionElement):
self.extension_elements.append(blob)
elif ElementTree.iselement(blob):
self.extension_elements.append(atom._ExtensionElementFromElementTree(
blob))
else:
self.extension_elements.append(atom.ExtensionElementFromString(blob))
def ExtendedPropertyFromString(xml_string):
return atom.CreateClassFromXMLString(ExtendedProperty, xml_string)
class GDataEntry(atom.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
_tag = atom.Entry._tag
_namespace = atom.Entry._namespace
_children = atom.Entry._children.copy()
_attributes = atom.Entry._attributes.copy()
def __GetId(self):
return self.__id
# This method was created to strip the unwanted whitespace from the id's
# text node.
def __SetId(self, id):
self.__id = id
if id is not None and id.text is not None:
self.__id.text = id.text.strip()
id = property(__GetId, __SetId)
def IsMedia(self):
"""Determines whether or not an entry is a GData Media entry.
"""
if (self.GetEditMediaLink()):
return True
else:
return False
def GetMediaURL(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if not self.IsMedia():
return None
else:
return self.content.src
def GDataEntryFromString(xml_string):
"""Creates a new GDataEntry instance given a string of XML."""
return atom.CreateClassFromXMLString(GDataEntry, xml_string)
class GDataFeed(atom.Feed, LinkFinder):
"""A Feed from a GData service"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = atom.Feed._children.copy()
_attributes = atom.Feed._attributes.copy()
_children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results',
TotalResults)
_children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index',
StartIndex)
_children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page',
ItemsPerPage)
# Add a conversion rule for atom:entry to make it into a GData
# Entry.
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry])
def __GetId(self):
return self.__id
def __SetId(self, id):
self.__id = id
if id is not None and id.text is not None:
self.__id.text = id.text.strip()
id = property(__GetId, __SetId)
def __GetGenerator(self):
return self.__generator
def __SetGenerator(self, generator):
self.__generator = generator
if generator is not None:
self.__generator.text = generator.text.strip()
generator = property(__GetGenerator, __SetGenerator)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.total_results = total_results
self.start_index = start_index
self.items_per_page = items_per_page
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GDataFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GDataFeed, xml_string)
class BatchId(atom.AtomBase):
_tag = 'id'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def BatchIdFromString(xml_string):
return atom.CreateClassFromXMLString(BatchId, xml_string)
class BatchOperation(atom.AtomBase):
_tag = 'operation'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, op_type=None, extension_elements=None,
extension_attributes=None,
text=None):
self.type = op_type
atom.AtomBase.__init__(self,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchOperationFromString(xml_string):
return atom.CreateClassFromXMLString(BatchOperation, xml_string)
class BatchStatus(atom.AtomBase):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_tag = 'status'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['code'] = 'code'
_attributes['reason'] = 'reason'
_attributes['content-type'] = 'content_type'
def __init__(self, code=None, reason=None, content_type=None,
extension_elements=None, extension_attributes=None, text=None):
self.code = code
self.reason = reason
self.content_type = content_type
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchStatusFromString(xml_string):
return atom.CreateClassFromXMLString(BatchStatus, xml_string)
class BatchEntry(GDataEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
_tag = GDataEntry._tag
_namespace = GDataEntry._namespace
_children = GDataEntry._children.copy()
_children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation)
_children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId)
_children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus)
_attributes = GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
batch_operation=None, batch_id=None, batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
GDataEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id, link=link,
published=published, rights=rights, source=source, summary=summary,
control=control, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
def BatchEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BatchEntry, xml_string)
class BatchInterrupted(atom.AtomBase):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_tag = 'interrupted'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['reason'] = 'reason'
_attributes['success'] = 'success'
_attributes['failures'] = 'failures'
_attributes['parsed'] = 'parsed'
def __init__(self, reason=None, success=None, failures=None, parsed=None,
extension_elements=None, extension_attributes=None, text=None):
self.reason = reason
self.success = success
self.failures = failures
self.parsed = parsed
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchInterruptedFromString(xml_string):
return atom.CreateClassFromXMLString(BatchInterrupted, xml_string)
class BatchFeed(GDataFeed):
"""A feed containing a list of batch request entries."""
_tag = GDataFeed._tag
_namespace = GDataFeed._namespace
_children = GDataFeed._children.copy()
_attributes = GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry])
_children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
interrupted=None,
extension_elements=None, extension_attributes=None, text=None):
self.interrupted = interrupted
GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results, start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def AddBatchEntry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
entry which will be sent to the server as part of the batch request.
The item must have a valid atom id so that the server knows which
entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(atom_id=atom.Id(text=id_url_string))
# TODO: handle cases in which the entry lacks batch_... members.
#if not isinstance(entry, BatchEntry):
# Convert the entry to a batch entry.
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(op_type=operation_string)
self.entry.append(entry)
return entry
def AddInsert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
def AddUpdate(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
def AddDelete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string,
operation_string=BATCH_DELETE)
def AddQuery(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string,
operation_string=BATCH_QUERY)
def GetBatchLink(self):
for link in self.link:
if link.rel == 'http://schemas.google.com/g/2005#batch':
return link
return None
def BatchFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BatchFeed, xml_string)
class EntryLink(atom.AtomBase):
"""The gd:entryLink element"""
_tag = 'entryLink'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
# The entry used to be an atom.Entry, now it is a GDataEntry.
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry)
_attributes['rel'] = 'rel'
_attributes['readOnly'] = 'read_only'
_attributes['href'] = 'href'
def __init__(self, href=None, read_only=None, rel=None,
entry=None, extension_elements=None,
extension_attributes=None, text=None):
self.href = href
self.read_only = read_only
self.rel = rel
self.entry = entry
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EntryLinkFromString(xml_string):
return atom.CreateClassFromXMLString(EntryLink, xml_string)
class FeedLink(atom.AtomBase):
"""The gd:feedLink element"""
_tag = 'feedLink'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed)
_attributes['rel'] = 'rel'
_attributes['readOnly'] = 'read_only'
_attributes['countHint'] = 'count_hint'
_attributes['href'] = 'href'
def __init__(self, count_hint=None, href=None, read_only=None, rel=None,
feed=None, extension_elements=None, extension_attributes=None,
text=None):
self.count_hint = count_hint
self.href = href
self.read_only = read_only
self.rel = rel
self.feed = feed
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def FeedLinkFromString(xml_string):
return atom.CreateClassFromXMLString(FeedLink, xml_string)

View File

@ -0,0 +1,15 @@
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

55
python/gdata/acl/data.py Normal file
View File

@ -0,0 +1,55 @@
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Access Control List (ACL) Extension"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import atom.data
import gdata.data
import gdata.opensearch.data
GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
class AclRole(atom.core.XmlElement):
"""Describes the role of an entry in an access control list."""
_qname = GACL_TEMPLATE % 'role'
value = 'value'
class AclScope(atom.core.XmlElement):
"""Describes the scope of an entry in an access control list."""
_qname = GACL_TEMPLATE % 'scope'
type = 'type'
value = 'value'
class AclEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of an access control list (ACL)."""
scope = AclScope
role = AclRole
class AclFeed(gdata.data.GDFeed):
"""Describes a feed of an access control list (ACL)."""
entry = [AclEntry]

View File

@ -0,0 +1,20 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package's modules adapt the gdata library to run in other environments
The first example is the appengine module which contains functions and
classes which modify a GDataService object to run on Google App Engine.
"""

View File

@ -0,0 +1,101 @@
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functions to persist serialized auth tokens in the datastore.
The get_token and set_token functions should be used in conjunction with
gdata.gauth's token_from_blob and token_to_blob to allow auth token objects
to be reused across requests. It is up to your own code to ensure that the
token key's are unique.
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
from google.appengine.ext import db
from google.appengine.api import memcache
class Token(db.Model):
"""Datastore Model which stores a serialized auth token."""
t = db.BlobProperty()
def get_token(unique_key):
"""Searches for a stored token with the desired key.
Checks memcache and then the datastore if required.
Args:
unique_key: str which uniquely identifies the desired auth token.
Returns:
A string encoding the auth token data. Use gdata.gauth.token_from_blob to
convert back into a usable token object. None if the token was not found
in memcache or the datastore.
"""
token_string = memcache.get(unique_key)
if token_string is None:
# The token wasn't in memcache, so look in the datastore.
token = Token.get_by_key_name(unique_key)
if token is None:
return None
return token.t
return token_string
def set_token(unique_key, token_str):
"""Saves the serialized auth token in the datastore.
The token is also stored in memcache to speed up retrieval on a cache hit.
Args:
unique_key: The unique name for this token as a string. It is up to your
code to ensure that this token value is unique in your application.
Previous values will be silently overwitten.
token_str: A serialized auth token as a string. I expect that this string
will be generated by gdata.gauth.token_to_blob.
Returns:
True if the token was stored sucessfully, False if the token could not be
safely cached (if an old value could not be cleared). If the token was
set in memcache, but not in the datastore, this function will return None.
However, in that situation an exception will likely be raised.
Raises:
Datastore exceptions may be raised from the App Engine SDK in the event of
failure.
"""
# First try to save in memcache.
result = memcache.set(unique_key, token_str)
# If memcache fails to save the value, clear the cached value.
if not result:
result = memcache.delete(unique_key)
# If we could not clear the cached value for this token, refuse to save.
if result == 0:
return False
# Save to the datastore.
if Token(key_name=unique_key, t=token_str).put():
return True
return None
def delete_token(unique_key):
# Clear from memcache.
memcache.delete(unique_key)
# Clear from the datastore.
Token(key_name=unique_key).delete()

View File

@ -0,0 +1,321 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides HTTP functions for gdata.service to use on Google App Engine
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
urlfetch API. Set the http_client member of a GDataService object to an
instance of an AppEngineHttpClient to allow the gdata library to run on
Google App Engine.
run_on_appengine: Function which will modify an existing GDataService object
to allow it to run on App Engine. It works by creating a new instance of
the AppEngineHttpClient and replacing the GDataService object's
http_client.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
import pickle
import atom.http_interface
import atom.token_store
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import memcache
def run_on_appengine(gdata_service, store_tokens=True,
single_user_mode=False, deadline=None):
"""Modifies a GDataService object to allow it to run on App Engine.
Args:
gdata_service: An instance of AtomService, GDataService, or any
of their subclasses which has an http_client member and a
token_store member.
store_tokens: Boolean, defaults to True. If True, the gdata_service
will attempt to add each token to it's token_store when
SetClientLoginToken or SetAuthSubToken is called. If False
the tokens will not automatically be added to the
token_store.
single_user_mode: Boolean, defaults to False. If True, the current_token
member of gdata_service will be set when
SetClientLoginToken or SetAuthTubToken is called. If set
to True, the current_token is set in the gdata_service
and anyone who accesses the object will use the same
token.
Note: If store_tokens is set to False and
single_user_mode is set to False, all tokens will be
ignored, since the library assumes: the tokens should not
be stored in the datastore and they should not be stored
in the gdata_service object. This will make it
impossible to make requests which require authorization.
deadline: int (optional) The number of seconds to wait for a response
before timing out on the HTTP request. If no deadline is
specified, the deafault deadline for HTTP requests from App
Engine is used. The maximum is currently 10 (for 10 seconds).
The default deadline for App Engine is 5 seconds.
"""
gdata_service.http_client = AppEngineHttpClient(deadline=deadline)
gdata_service.token_store = AppEngineTokenStore()
gdata_service.auto_store_tokens = store_tokens
gdata_service.auto_set_current_token = single_user_mode
return gdata_service
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None, deadline=None):
self.debug = False
self.headers = headers or {}
self.deadline = deadline
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [_convert_data_part(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = _convert_data_part(data)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
all_headers['Content-Length'] = str(len(data_str))
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = 'application/atom+xml'
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
if self.deadline is None:
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers, follow_redirects=False))
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers, follow_redirects=False,
deadline=self.deadline))
def _convert_data_part(data):
if not data or isinstance(data, str):
return data
elif hasattr(data, 'read'):
# data is a file like object, so read it completely.
return data.read()
# The data object was not a file.
# Try to convert to a string and send the data.
return str(data)
class HttpResponse(object):
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
Used to allow the resoinse from HttpRequest to be usable by gdata.service
methods.
"""
def __init__(self, urlfetch_response):
self.body = StringIO.StringIO(urlfetch_response.content)
self.headers = urlfetch_response.headers
self.status = urlfetch_response.status_code
self.reason = ''
def read(self, length=None):
if not length:
return self.body.read()
else:
return self.body.read(length)
def getheader(self, name):
if not self.headers.has_key(name):
return self.headers[name.lower()]
return self.headers[name]
class TokenCollection(db.Model):
"""Datastore Model which associates auth tokens with the current user."""
user = db.UserProperty()
pickled_tokens = db.BlobProperty()
class AppEngineTokenStore(atom.token_store.TokenStore):
"""Stores the user's auth tokens in the App Engine datastore.
Tokens are only written to the datastore if a user is signed in (if
users.get_current_user() returns a user object).
"""
def __init__(self):
self.user = None
def add_token(self, token):
"""Associates the token with the current user and stores it.
If there is no current user, the token will not be stored.
Returns:
False if the token was not stored.
"""
tokens = load_auth_tokens(self.user)
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
tokens[str(scope)] = token
key = save_auth_tokens(tokens, self.user)
if key:
return True
return False
def find_token(self, url):
"""Searches the current user's collection of token for a token which can
be used for a request to the url.
Returns:
The stored token which belongs to the current user and is valid for the
desired URL. If there is no current user, or there is no valid user
token in the datastore, a atom.http_interface.GenericToken is returned.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
tokens = load_auth_tokens(self.user)
if url in tokens:
token = tokens[url]
if token.valid_for_scope(url):
return token
else:
del tokens[url]
save_auth_tokens(tokens, self.user)
for scope, token in tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the current user's collection in the datastore.
Returns:
False if the token was not removed, this could be because the token was
not in the datastore, or because there is no current user.
"""
token_found = False
scopes_to_delete = []
tokens = load_auth_tokens(self.user)
for scope, stored_token in tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del tokens[scope]
if token_found:
save_auth_tokens(tokens, self.user)
return token_found
def remove_all_tokens(self):
"""Removes all of the current user's tokens from the datastore."""
save_auth_tokens({}, self.user)
def save_auth_tokens(token_dict, user=None):
"""Associates the tokens with the current user and writes to the datastore.
If there us no current user, the tokens are not written and this function
returns None.
Returns:
The key of the datastore entity containing the user's tokens, or None if
there was no current user.
"""
if user is None:
user = users.get_current_user()
if user is None:
return None
memcache.set('gdata_pickled_tokens:%s' % user, pickle.dumps(token_dict))
user_tokens = TokenCollection.all().filter('user =', user).get()
if user_tokens:
user_tokens.pickled_tokens = pickle.dumps(token_dict)
return user_tokens.put()
else:
user_tokens = TokenCollection(
user=user,
pickled_tokens=pickle.dumps(token_dict))
return user_tokens.put()
def load_auth_tokens(user=None):
"""Reads a dictionary of the current user's tokens from the datastore.
If there is no current user (a user is not signed in to the app) or the user
does not have any tokens, an empty dictionary is returned.
"""
if user is None:
user = users.get_current_user()
if user is None:
return {}
pickled_tokens = memcache.get('gdata_pickled_tokens:%s' % user)
if pickled_tokens:
return pickle.loads(pickled_tokens)
user_tokens = TokenCollection.all().filter('user =', user).get()
if user_tokens:
memcache.set('gdata_pickled_tokens:%s' % user, user_tokens.pickled_tokens)
return pickle.loads(user_tokens.pickled_tokens)
return {}

View File

@ -0,0 +1,223 @@
#!/usr/bin/python
#
# Original Copyright (C) 2006 Google Inc.
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note that this module will not function without specifically adding
# 'analytics': [ #Google Analytics
# 'https://www.google.com/analytics/feeds/'],
# to CLIENT_LOGIN_SCOPES in the gdata/service.py file
"""Contains extensions to Atom objects used with Google Analytics."""
__author__ = 'api.suryasev (Sal Uryasev)'
import atom
import gdata
GAN_NAMESPACE = 'http://schemas.google.com/analytics/2009'
class TableId(gdata.GDataEntry):
"""tableId element."""
_tag = 'tableId'
_namespace = GAN_NAMESPACE
class Property(gdata.GDataEntry):
_tag = 'property'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, *args, **kwargs):
self.name = name
self.value = value
super(Property, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class AccountListEntry(gdata.GDataEntry):
"""The Google Documents version of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}tableId' % GAN_NAMESPACE] = ('tableId',
[TableId])
_children['{%s}property' % GAN_NAMESPACE] = ('property',
[Property])
def __init__(self, tableId=None, property=None,
*args, **kwargs):
self.tableId = tableId
self.property = property
super(AccountListEntry, self).__init__(*args, **kwargs)
def AccountListEntryFromString(xml_string):
"""Converts an XML string into an AccountListEntry object.
Args:
xml_string: string The XML describing a Document List feed entry.
Returns:
A AccountListEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(AccountListEntry, xml_string)
class AccountListFeed(gdata.GDataFeed):
"""A feed containing a list of Google Documents Items"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[AccountListEntry])
def AccountListFeedFromString(xml_string):
"""Converts an XML string into an AccountListFeed object.
Args:
xml_string: string The XML describing an AccountList feed.
Returns:
An AccountListFeed object corresponding to the given XML.
All properties are also linked to with a direct reference
from each entry object for convenience. (e.g. entry.AccountName)
"""
feed = atom.CreateClassFromXMLString(AccountListFeed, xml_string)
for entry in feed.entry:
for pro in entry.property:
entry.__dict__[pro.name.replace('ga:','')] = pro
for td in entry.tableId:
td.__dict__['value'] = td.text
return feed
class Dimension(gdata.GDataEntry):
_tag = 'dimension'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
_attributes['type'] = 'type'
_attributes['confidenceInterval'] = 'confidence_interval'
def __init__(self, name=None, value=None, type=None,
confidence_interval = None, *args, **kwargs):
self.name = name
self.value = value
self.type = type
self.confidence_interval = confidence_interval
super(Dimension, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class Metric(gdata.GDataEntry):
_tag = 'metric'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
_attributes['type'] = 'type'
_attributes['confidenceInterval'] = 'confidence_interval'
def __init__(self, name=None, value=None, type=None,
confidence_interval = None, *args, **kwargs):
self.name = name
self.value = value
self.type = type
self.confidence_interval = confidence_interval
super(Metric, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class AnalyticsDataEntry(gdata.GDataEntry):
"""The Google Analytics version of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}dimension' % GAN_NAMESPACE] = ('dimension',
[Dimension])
_children['{%s}metric' % GAN_NAMESPACE] = ('metric',
[Metric])
def __init__(self, dimension=None, metric=None, *args, **kwargs):
self.dimension = dimension
self.metric = metric
super(AnalyticsDataEntry, self).__init__(*args, **kwargs)
class AnalyticsDataFeed(gdata.GDataFeed):
"""A feed containing a list of Google Analytics Data Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[AnalyticsDataEntry])
"""
Data Feed
"""
def AnalyticsDataFeedFromString(xml_string):
"""Converts an XML string into an AccountListFeed object.
Args:
xml_string: string The XML describing an AccountList feed.
Returns:
An AccountListFeed object corresponding to the given XML.
Each metric and dimension is also referenced directly from
the entry for easier access. (e.g. entry.keyword.value)
"""
feed = atom.CreateClassFromXMLString(AnalyticsDataFeed, xml_string)
if feed.entry:
for entry in feed.entry:
for met in entry.metric:
entry.__dict__[met.name.replace('ga:','')] = met
if entry.dimension is not None:
for dim in entry.dimension:
entry.__dict__[dim.name.replace('ga:','')] = dim
return feed

View File

@ -0,0 +1,313 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Streamlines requests to the Google Analytics APIs."""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import atom.data
import gdata.client
import gdata.analytics.data
import gdata.gauth
class AnalyticsClient(gdata.client.GDClient):
"""Client extension for the Google Analytics API service."""
api_version = '2'
auth_service = 'analytics'
auth_scopes = gdata.gauth.AUTH_SCOPES['analytics']
account_type = 'GOOGLE'
def __init__(self, auth_token=None, **kwargs):
"""Initializes a new client for the Google Analytics Data Export API.
Args:
auth_token: gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken (optional) Authorizes this client to edit the user's data.
kwargs: The other parameters to pass to gdata.client.GDClient
constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
def get_account_feed(self, feed_uri, auth_token=None, **kwargs):
"""Makes a request to the Analytics API Account Feed.
Args:
feed_uri: str or gdata.analytics.AccountFeedQuery The Analytics Account
Feed uri to define what data to retrieve from the API. Can also be
used with a gdata.analytics.AccountFeedQuery object.
"""
return self.get_feed(feed_uri,
desired_class=gdata.analytics.data.AccountFeed,
auth_token=auth_token,
**kwargs)
GetAccountFeed = get_account_feed
def get_data_feed(self, feed_uri, auth_token=None, **kwargs):
"""Makes a request to the Analytics API Data Feed.
Args:
feed_uri: str or gdata.analytics.AccountFeedQuery The Analytics Data
Feed uri to define what data to retrieve from the API. Can also be
used with a gdata.analytics.AccountFeedQuery object.
"""
return self.get_feed(feed_uri,
desired_class=gdata.analytics.data.DataFeed,
auth_token=auth_token,
**kwargs)
GetDataFeed = get_data_feed
def get_management_feed(self, feed_uri, auth_token=None, **kwargs):
"""Makes a request to the Google Analytics Management API.
The Management API provides read-only access to configuration data for
Google Analytics and supercedes the Data Export API Account Feed.
The Management API supports 5 feeds: account, web property, profile,
goal, advanced segment.
You can access each feed through the respective management query class
below. All requests return the same data object.
Args:
feed_uri: str or AccountQuery, WebPropertyQuery,
ProfileQuery, GoalQuery, MgmtAdvSegFeedQuery
The Management API Feed uri to define which feed to retrieve.
Either use a string or one of the wrapper classes.
"""
return self.get_feed(feed_uri,
desired_class=gdata.analytics.data.ManagementFeed,
auth_token=auth_token,
**kwargs)
GetMgmtFeed = GetManagementFeed = get_management_feed
class AnalyticsBaseQuery(gdata.client.GDQuery):
"""Abstracts common configuration across all query objects.
Attributes:
scheme: string The default scheme. Should always be https.
host: string The default host.
"""
scheme = 'https'
host = 'www.google.com'
class AccountFeedQuery(AnalyticsBaseQuery):
"""Account Feed query class to simplify constructing Account Feed Urls.
To use this class, you can either pass a dict in the constructor that has
all the data feed query parameters as keys:
queryUrl = AccountFeedQuery({'max-results': '10000'})
Alternatively you can add new parameters directly to the query object:
queryUrl = AccountFeedQuery()
queryUrl.query['max-results'] = '10000'
Args:
query: dict (optional) Contains all the GA Data Feed query parameters
as keys.
"""
path = '/analytics/feeds/accounts/default'
def __init__(self, query={}, **kwargs):
self.query = query
gdata.client.GDQuery(self, **kwargs)
class DataFeedQuery(AnalyticsBaseQuery):
"""Data Feed query class to simplify constructing Data Feed Urls.
To use this class, you can either pass a dict in the constructor that has
all the data feed query parameters as keys:
queryUrl = DataFeedQuery({'start-date': '2008-10-01'})
Alternatively you can add new parameters directly to the query object:
queryUrl = DataFeedQuery()
queryUrl.query['start-date'] = '2008-10-01'
Args:
query: dict (optional) Contains all the GA Data Feed query parameters
as keys.
"""
path = '/analytics/feeds/data'
def __init__(self, query={}, **kwargs):
self.query = query
gdata.client.GDQuery(self, **kwargs)
class AccountQuery(AnalyticsBaseQuery):
"""Management API Account Feed query class.
Example Usage:
queryUrl = AccountQuery()
queryUrl = AccountQuery({'max-results': 100})
queryUrl2 = AccountQuery()
queryUrl2.query['max-results'] = 100
Args:
query: dict (optional) A dictionary of query parameters.
"""
path = '/analytics/feeds/datasources/ga/accounts'
def __init__(self, query={}, **kwargs):
self.query = query
gdata.client.GDQuery(self, **kwargs)
class WebPropertyQuery(AnalyticsBaseQuery):
"""Management API Web Property Feed query class.
Example Usage:
queryUrl = WebPropertyQuery()
queryUrl = WebPropertyQuery('123', {'max-results': 100})
queryUrl = WebPropertyQuery(acct_id='123',
query={'max-results': 100})
queryUrl2 = WebPropertyQuery()
queryUrl2.acct_id = '1234'
queryUrl2.query['max-results'] = 100
Args:
acct_id: string (optional) The account ID to filter results.
Default is ~all.
query: dict (optional) A dictionary of query parameters.
"""
def __init__(self, acct_id='~all', query={}, **kwargs):
self.acct_id = acct_id
self.query = query
gdata.client.GDQuery(self, **kwargs)
@property
def path(self):
"""Wrapper for path attribute."""
return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties' %
self.acct_id)
class ProfileQuery(AnalyticsBaseQuery):
"""Management API Profile Feed query class.
Example Usage:
queryUrl = ProfileQuery()
queryUrl = ProfileQuery('123', 'UA-123-1', {'max-results': 100})
queryUrl = ProfileQuery(acct_id='123',
web_prop_id='UA-123-1',
query={'max-results': 100})
queryUrl2 = ProfileQuery()
queryUrl2.acct_id = '123'
queryUrl2.web_prop_id = 'UA-123-1'
queryUrl2.query['max-results'] = 100
Args:
acct_id: string (optional) The account ID to filter results.
Default is ~all.
web_prop_id: string (optional) The web property ID to filter results.
Default is ~all.
query: dict (optional) A dictionary of query parameters.
"""
def __init__(self, acct_id='~all', web_prop_id='~all', query={}, **kwargs):
self.acct_id = acct_id
self.web_prop_id = web_prop_id
self.query = query
gdata.client.GDQuery(self, **kwargs)
@property
def path(self):
"""Wrapper for path attribute."""
return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties'
'/%s/profiles' % (self.acct_id, self.web_prop_id))
class GoalQuery(AnalyticsBaseQuery):
"""Management API Goal Feed query class.
Example Usage:
queryUrl = GoalQuery()
queryUrl = GoalQuery('123', 'UA-123-1', '555',
{'max-results': 100})
queryUrl = GoalQuery(acct_id='123',
web_prop_id='UA-123-1',
profile_id='555',
query={'max-results': 100})
queryUrl2 = GoalQuery()
queryUrl2.acct_id = '123'
queryUrl2.web_prop_id = 'UA-123-1'
queryUrl2.query['max-results'] = 100
Args:
acct_id: string (optional) The account ID to filter results.
Default is ~all.
web_prop_id: string (optional) The web property ID to filter results.
Default is ~all.
profile_id: string (optional) The profile ID to filter results.
Default is ~all.
query: dict (optional) A dictionary of query parameters.
"""
def __init__(self, acct_id='~all', web_prop_id='~all', profile_id='~all',
query={}, **kwargs):
self.acct_id = acct_id
self.web_prop_id = web_prop_id
self.profile_id = profile_id
self.query = query or {}
gdata.client.GDQuery(self, **kwargs)
@property
def path(self):
"""Wrapper for path attribute."""
return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties'
'/%s/profiles/%s/goals' % (self.acct_id, self.web_prop_id,
self.profile_id))
class AdvSegQuery(AnalyticsBaseQuery):
"""Management API Goal Feed query class.
Example Usage:
queryUrl = AdvSegQuery()
queryUrl = AdvSegQuery({'max-results': 100})
queryUrl1 = AdvSegQuery()
queryUrl1.query['max-results'] = 100
Args:
query: dict (optional) A dictionary of query parameters.
"""
path = '/analytics/feeds/datasources/ga/segments'
def __init__(self, query={}, **kwargs):
self.query = query
gdata.client.GDQuery(self, **kwargs)

View File

@ -0,0 +1,365 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for both the
Google Analytics Data Export and Management APIs. Although both APIs
operate on different parts of Google Analytics, they share common XML
elements and are released in the same module.
The Management API supports 5 feeds all using the same ManagementFeed
data class.
"""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import gdata.data
import atom.core
import atom.data
# XML Namespace used in Google Analytics API entities.
DXP_NS = '{http://schemas.google.com/analytics/2009}%s'
GA_NS = '{http://schemas.google.com/ga/2009}%s'
GD_NS = '{http://schemas.google.com/g/2005}%s'
class GetProperty(object):
"""Utility class to simplify retrieving Property objects."""
def get_property(self, name):
"""Helper method to return a propery object by its name attribute.
Args:
name: string The name of the <dxp:property> element to retrieve.
Returns:
A property object corresponding to the matching <dxp:property> element.
if no property is found, None is returned.
"""
for prop in self.property:
if prop.name == name:
return prop
return None
GetProperty = get_property
class GetMetric(object):
"""Utility class to simplify retrieving Metric objects."""
def get_metric(self, name):
"""Helper method to return a propery value by its name attribute
Args:
name: string The name of the <dxp:metric> element to retrieve.
Returns:
A property object corresponding to the matching <dxp:metric> element.
if no property is found, None is returned.
"""
for met in self.metric:
if met.name == name:
return met
return None
GetMetric = get_metric
class GetDimension(object):
"""Utility class to simplify retrieving Dimension objects."""
def get_dimension(self, name):
"""Helper method to return a dimention object by its name attribute
Args:
name: string The name of the <dxp:dimension> element to retrieve.
Returns:
A dimension object corresponding to the matching <dxp:dimension> element.
if no dimension is found, None is returned.
"""
for dim in self.dimension:
if dim.name == name:
return dim
return None
GetDimension = get_dimension
class GaLinkFinder(object):
"""Utility class to return specific links in Google Analytics feeds."""
def get_parent_links(self):
"""Returns a list of all the parent links in an entry."""
links = []
for link in self.link:
if link.rel == link.parent():
links.append(link)
return links
GetParentLinks = get_parent_links
def get_child_links(self):
"""Returns a list of all the child links in an entry."""
links = []
for link in self.link:
if link.rel == link.child():
links.append(link)
return links
GetChildLinks = get_child_links
def get_child_link(self, target_kind):
"""Utility method to return one child link.
Returns:
A child link with the given target_kind. None if the target_kind was
not found.
"""
for link in self.link:
if link.rel == link.child() and link.target_kind == target_kind:
return link
return None
GetChildLink = get_child_link
class StartDate(atom.core.XmlElement):
"""Analytics Feed <dxp:startDate>"""
_qname = DXP_NS % 'startDate'
class EndDate(atom.core.XmlElement):
"""Analytics Feed <dxp:endDate>"""
_qname = DXP_NS % 'endDate'
class Metric(atom.core.XmlElement):
"""Analytics Feed <dxp:metric>"""
_qname = DXP_NS % 'metric'
name = 'name'
type = 'type'
value = 'value'
confidence_interval = 'confidenceInterval'
class Aggregates(atom.core.XmlElement, GetMetric):
"""Analytics Data Feed <dxp:aggregates>"""
_qname = DXP_NS % 'aggregates'
metric = [Metric]
class TableId(atom.core.XmlElement):
"""Analytics Feed <dxp:tableId>"""
_qname = DXP_NS % 'tableId'
class TableName(atom.core.XmlElement):
"""Analytics Feed <dxp:tableName>"""
_qname = DXP_NS % 'tableName'
class Property(atom.core.XmlElement):
"""Analytics Feed <dxp:property>"""
_qname = DXP_NS % 'property'
name = 'name'
value = 'value'
class Definition(atom.core.XmlElement):
"""Analytics Feed <dxp:definition>"""
_qname = DXP_NS % 'definition'
class Segment(atom.core.XmlElement):
"""Analytics Feed <dxp:segment>"""
_qname = DXP_NS % 'segment'
id = 'id'
name = 'name'
definition = Definition
class Engagement(atom.core.XmlElement):
"""Analytics Feed <dxp:engagement>"""
_qname = GA_NS % 'engagement'
type = 'type'
comparison = 'comparison'
threshold_value = 'thresholdValue'
class Step(atom.core.XmlElement):
"""Analytics Feed <dxp:step>"""
_qname = GA_NS % 'step'
number = 'number'
name = 'name'
path = 'path'
class Destination(atom.core.XmlElement):
"""Analytics Feed <dxp:destination>"""
_qname = GA_NS % 'destination'
step = [Step]
expression = 'expression'
case_sensitive = 'caseSensitive'
match_type = 'matchType'
step1_required = 'step1Required'
class Goal(atom.core.XmlElement):
"""Analytics Feed <dxp:goal>"""
_qname = GA_NS % 'goal'
destination = Destination
engagement = Engagement
number = 'number'
name = 'name'
value = 'value'
active = 'active'
class CustomVariable(atom.core.XmlElement):
"""Analytics Data Feed <dxp:customVariable>"""
_qname = GA_NS % 'customVariable'
index = 'index'
name = 'name'
scope = 'scope'
class DataSource(atom.core.XmlElement, GetProperty):
"""Analytics Data Feed <dxp:dataSource>"""
_qname = DXP_NS % 'dataSource'
table_id = TableId
table_name = TableName
property = [Property]
class Dimension(atom.core.XmlElement):
"""Analytics Feed <dxp:dimension>"""
_qname = DXP_NS % 'dimension'
name = 'name'
value = 'value'
class AnalyticsLink(atom.data.Link):
"""Subclass of link <link>"""
target_kind = GD_NS % 'targetKind'
@classmethod
def parent(cls):
"""Parent target_kind"""
return '%s#parent' % GA_NS[1:-3]
@classmethod
def child(cls):
"""Child target_kind"""
return '%s#child' % GA_NS[1:-3]
# Account Feed.
class AccountEntry(gdata.data.GDEntry, GetProperty):
"""Analytics Account Feed <entry>"""
_qname = atom.data.ATOM_TEMPLATE % 'entry'
table_id = TableId
property = [Property]
goal = [Goal]
custom_variable = [CustomVariable]
class AccountFeed(gdata.data.GDFeed):
"""Analytics Account Feed <feed>"""
_qname = atom.data.ATOM_TEMPLATE % 'feed'
segment = [Segment]
entry = [AccountEntry]
# Data Feed.
class DataEntry(gdata.data.GDEntry, GetMetric, GetDimension):
"""Analytics Data Feed <entry>"""
_qname = atom.data.ATOM_TEMPLATE % 'entry'
dimension = [Dimension]
metric = [Metric]
def get_object(self, name):
"""Returns either a Dimension or Metric object with the same name as the
name parameter.
Args:
name: string The name of the object to retrieve.
Returns:
Either a Dimension or Object that has the same as the name parameter.
"""
output = self.GetDimension(name)
if not output:
output = self.GetMetric(name)
return output
GetObject = get_object
class DataFeed(gdata.data.GDFeed):
"""Analytics Data Feed <feed>.
Although there is only one datasource, it is stored in an array to replicate
the design of the Java client library and ensure backwards compatibility if
new data sources are added in the future.
"""
_qname = atom.data.ATOM_TEMPLATE % 'feed'
start_date = StartDate
end_date = EndDate
aggregates = Aggregates
data_source = [DataSource]
entry = [DataEntry]
segment = Segment
# Management Feed.
class ManagementEntry(gdata.data.GDEntry, GetProperty, GaLinkFinder):
"""Analytics Managememt Entry <entry>."""
_qname = atom.data.ATOM_TEMPLATE % 'entry'
kind = GD_NS % 'kind'
property = [Property]
goal = Goal
segment = Segment
link = [AnalyticsLink]
class ManagementFeed(gdata.data.GDFeed):
"""Analytics Management Feed <feed>.
This class holds the data for all 5 Management API feeds: Account,
Web Property, Profile, Goal, and Advanced Segment Feeds.
"""
_qname = atom.data.ATOM_TEMPLATE % 'feed'
entry = [ManagementEntry]
kind = GD_NS % 'kind'

View File

@ -0,0 +1,331 @@
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AccountsService extends the GDataService to streamline Google Analytics
account information operations.
AnalyticsDataService: Provides methods to query google analytics data feeds.
Extends GDataService.
DataQuery: Queries a Google Analytics Data list feed.
AccountQuery: Queries a Google Analytics Account list feed.
"""
__author__ = 'api.suryasev (Sal Uryasev)'
import urllib
import atom
import gdata.service
import gdata.analytics
class AccountsService(gdata.service.GDataService):
"""Client extension for the Google Analytics Account List feed."""
def __init__(self, email="", password=None, source=None,
server='www.google.com/analytics', additional_headers=None,
**kwargs):
"""Creates a client for the Google Analytics service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='analytics',
source=source, server=server, additional_headers=additional_headers,
**kwargs)
def QueryAccountListFeed(self, uri):
"""Retrieves an AccountListFeed by retrieving a URI based off the Document
List feed, including any query parameters. An AccountListFeed object
can be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AccountListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
def GetAccountListEntry(self, uri):
"""Retrieves a particular AccountListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in an Account List feed.
Returns:
An AccountLisFeed object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
def GetAccountList(self, max_results=1000, text_query=None,
params=None, categories=None):
"""Retrieves a feed containing all of a user's accounts and profiles."""
q = gdata.analytics.service.AccountQuery(max_results=max_results,
text_query=text_query,
params=params,
categories=categories);
return self.QueryAccountListFeed(q.ToUri())
class AnalyticsDataService(gdata.service.GDataService):
"""Client extension for the Google Analytics service Data List feed."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com/analytics', additional_headers=None,
**kwargs):
"""Creates a client for the Google Analytics service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(self,
email=email, password=password, service='analytics', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetData(self, ids='', dimensions='', metrics='',
sort='', filters='', start_date='',
end_date='', start_index='',
max_results=''):
"""Retrieves a feed containing a user's data
ids: comma-separated string of analytics accounts.
dimensions: comma-separated string of dimensions.
metrics: comma-separated string of metrics.
sort: comma-separated string of dimensions and metrics for sorting.
This may be previxed with a minus to sort in reverse order.
(e.g. '-ga:keyword')
If ommited, the first dimension passed in will be used.
filters: comma-separated string of filter parameters.
(e.g. 'ga:keyword==google')
start_date: start date for data pull.
end_date: end date for data pull.
start_index: used in combination with max_results to pull more than 1000
entries. This defaults to 1.
max_results: maximum results that the pull will return. This defaults
to, and maxes out at 1000.
"""
q = gdata.analytics.service.DataQuery(ids=ids,
dimensions=dimensions,
metrics=metrics,
filters=filters,
sort=sort,
start_date=start_date,
end_date=end_date,
start_index=start_index,
max_results=max_results);
return self.AnalyticsDataFeed(q.ToUri())
def AnalyticsDataFeed(self, uri):
"""Retrieves an AnalyticsListFeed by retrieving a URI based off the
Document List feed, including any query parameters. An
AnalyticsListFeed object can be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AnalyticsListFeed object representing the feed returned by the
server.
"""
return self.Get(uri,
converter=gdata.analytics.AnalyticsDataFeedFromString)
"""
Account Fetching
"""
def QueryAccountListFeed(self, uri):
"""Retrieves an Account ListFeed by retrieving a URI based off the Account
List feed, including any query parameters. A AccountQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AccountListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
def GetAccountListEntry(self, uri):
"""Retrieves a particular AccountListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in an Account List feed.
Returns:
An AccountListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
def GetAccountList(self, username="default", max_results=1000,
start_index=1):
"""Retrieves a feed containing all of a user's accounts and profiles.
The username parameter is soon to be deprecated, with 'default'
becoming the only allowed parameter.
"""
if not username:
raise Exception("username is a required parameter")
q = gdata.analytics.service.AccountQuery(username=username,
max_results=max_results,
start_index=start_index);
return self.QueryAccountListFeed(q.ToUri())
class DataQuery(gdata.service.Query):
"""Object used to construct a URI to a data feed"""
def __init__(self, feed='/feeds/data', text_query=None,
params=None, categories=None, ids="",
dimensions="", metrics="", sort="", filters="",
start_date="", end_date="", start_index="",
max_results=""):
"""Constructor for Analytics List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/data')
text_query: string (optional) The contents of the q query parameter.
This string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
ids: comma-separated string of analytics accounts.
dimensions: comma-separated string of dimensions.
metrics: comma-separated string of metrics.
sort: comma-separated string of dimensions and metrics.
This may be previxed with a minus to sort in reverse order
(e.g. '-ga:keyword').
If ommited, the first dimension passed in will be used.
filters: comma-separated string of filter parameters
(e.g. 'ga:keyword==google').
start_date: start date for data pull.
end_date: end date for data pull.
start_index: used in combination with max_results to pull more than 1000
entries. This defaults to 1.
max_results: maximum results that the pull will return. This defaults
to, and maxes out at 1000.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.elements = {'ids': ids,
'dimensions': dimensions,
'metrics': metrics,
'sort': sort,
'filters': filters,
'start-date': start_date,
'end-date': end_date,
'start-index': start_index,
'max-results': max_results}
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Analytics
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed]) + '?' + \
urllib.urlencode(dict([(key, value) for key, value in \
self.elements.iteritems() if value]))
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
class AccountQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Account List feed"""
def __init__(self, feed='/feeds/accounts', start_index=1,
max_results=1000, username='default', text_query=None,
params=None, categories=None):
"""Constructor for Account List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current
feed.
projection: string (optional) The projection chosen for the current
feed.
text_query: string (optional) The contents of the q query parameter.
This string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
username: string (deprecated) This value should now always be passed as
'default'.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.max_results = max_results
self.start_index = start_index
self.username = username
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Account
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.username]) + '?' + \
'&'.join(['max-results=' + str(self.max_results),
'start-index=' + str(self.start_index)])
new_feed = self.feed
self.feed = old_feed
return new_feed

View File

@ -0,0 +1,526 @@
#!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains objects used with Google Apps."""
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
import atom
import gdata
# XML namespaces which are often used in Google Apps entity.
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
class EmailList(atom.AtomBase):
"""The Google Apps EmailList element"""
_tag = 'emailList'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListFromString(xml_string):
return atom.CreateClassFromXMLString(EmailList, xml_string)
class Who(atom.AtomBase):
"""The Google Apps Who element"""
_tag = 'who'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['email'] = 'email'
def __init__(self, rel=None, email=None, extension_elements=None,
extension_attributes=None, text=None):
self.rel = rel
self.email = email
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def WhoFromString(xml_string):
return atom.CreateClassFromXMLString(Who, xml_string)
class Login(atom.AtomBase):
"""The Google Apps Login element"""
_tag = 'login'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['userName'] = 'user_name'
_attributes['password'] = 'password'
_attributes['suspended'] = 'suspended'
_attributes['admin'] = 'admin'
_attributes['changePasswordAtNextLogin'] = 'change_password'
_attributes['agreedToTerms'] = 'agreed_to_terms'
_attributes['ipWhitelisted'] = 'ip_whitelisted'
_attributes['hashFunctionName'] = 'hash_function_name'
def __init__(self, user_name=None, password=None, suspended=None,
ip_whitelisted=None, hash_function_name=None,
admin=None, change_password=None, agreed_to_terms=None,
extension_elements=None, extension_attributes=None,
text=None):
self.user_name = user_name
self.password = password
self.suspended = suspended
self.admin = admin
self.change_password = change_password
self.agreed_to_terms = agreed_to_terms
self.ip_whitelisted = ip_whitelisted
self.hash_function_name = hash_function_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LoginFromString(xml_string):
return atom.CreateClassFromXMLString(Login, xml_string)
class Quota(atom.AtomBase):
"""The Google Apps Quota element"""
_tag = 'quota'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['limit'] = 'limit'
def __init__(self, limit=None, extension_elements=None,
extension_attributes=None, text=None):
self.limit = limit
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def QuotaFromString(xml_string):
return atom.CreateClassFromXMLString(Quota, xml_string)
class Name(atom.AtomBase):
"""The Google Apps Name element"""
_tag = 'name'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['familyName'] = 'family_name'
_attributes['givenName'] = 'given_name'
def __init__(self, family_name=None, given_name=None,
extension_elements=None, extension_attributes=None, text=None):
self.family_name = family_name
self.given_name = given_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return atom.CreateClassFromXMLString(Name, xml_string)
class Nickname(atom.AtomBase):
"""The Google Apps Nickname element"""
_tag = 'nickname'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None,
extension_elements=None, extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NicknameFromString(xml_string):
return atom.CreateClassFromXMLString(Nickname, xml_string)
class NicknameEntry(gdata.GDataEntry):
"""A Google Apps flavor of an Atom Entry for Nickname"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
_children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
login=None, nickname=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.login = login
self.nickname = nickname
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NicknameEntryFromString(xml_string):
return atom.CreateClassFromXMLString(NicknameEntry, xml_string)
class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps Nickname feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def NicknameFeedFromString(xml_string):
return atom.CreateClassFromXMLString(NicknameFeed, xml_string)
class UserEntry(gdata.GDataEntry):
"""A Google Apps flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
_children['{%s}name' % APPS_NAMESPACE] = ('name', Name)
_children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota)
# This child may already be defined in GDataEntry, confirm before removing.
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
login=None, name=None, quota=None, who=None, feed_link=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.login = login
self.name = name
self.quota = quota
self.who = who
self.feed_link = feed_link or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UserEntryFromString(xml_string):
return atom.CreateClassFromXMLString(UserEntry, xml_string)
class UserFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps User feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def UserFeedFromString(xml_string):
return atom.CreateClassFromXMLString(UserFeed, xml_string)
class EmailListEntry(gdata.GDataEntry):
"""A Google Apps EmailList flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList)
# Might be able to remove this _children entry.
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
email_list=None, feed_link=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.email_list = email_list
self.feed_link = feed_link or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListEntryFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListEntry, xml_string)
class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps EmailList feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def EmailListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListFeed, xml_string)
class EmailListRecipientEntry(gdata.GDataEntry):
"""A Google Apps EmailListRecipient flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
who=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.who = who
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListRecipientEntryFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string)
class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps EmailListRecipient feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[EmailListRecipientEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def EmailListRecipientFeedFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string)
class Property(atom.AtomBase):
"""The Google Apps Property element"""
_tag = 'property'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PropertyFromString(xml_string):
return atom.CreateClassFromXMLString(Property, xml_string)
class PropertyEntry(gdata.GDataEntry):
"""A Google Apps Property flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}property' % APPS_NAMESPACE] = ('property', [Property])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
property=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.property = property
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PropertyEntryFromString(xml_string):
return atom.CreateClassFromXMLString(PropertyEntry, xml_string)
class PropertyFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps Property feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PropertyEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def PropertyFeedFromString(xml_string):
return atom.CreateClassFromXMLString(PropertyFeed, xml_string)

View File

@ -0,0 +1,16 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,471 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to set domain admin settings.
AdminSettingsService: Set admin settings."""
__author__ = 'jlee@pbu.edu'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
class AdminSettingsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Admin Settings service."""
def _serviceUrl(self, setting_id, domain=None):
if domain is None:
domain = self.domain
return '/a/feeds/domain/%s/%s/%s' % (API_VER, domain, setting_id)
def genericGet(self, location):
"""Generic HTTP Get Wrapper
Args:
location: relative uri to Get
Returns:
A dict containing the result of the get operation."""
uri = self._serviceUrl(location)
try:
return self._GetProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetDefaultLanguage(self):
"""Gets Domain Default Language
Args:
None
Returns:
Default Language as a string. All possible values are listed at:
http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags"""
result = self.genericGet('general/defaultLanguage')
return result['defaultLanguage']
def UpdateDefaultLanguage(self, defaultLanguage):
"""Updates Domain Default Language
Args:
defaultLanguage: Domain Language to set
possible values are at:
http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags
Returns:
A dict containing the result of the put operation"""
uri = self._serviceUrl('general/defaultLanguage')
properties = {'defaultLanguage': defaultLanguage}
return self._PutProperties(uri, properties)
def GetOrganizationName(self):
"""Gets Domain Default Language
Args:
None
Returns:
Organization Name as a string."""
result = self.genericGet('general/organizationName')
return result['organizationName']
def UpdateOrganizationName(self, organizationName):
"""Updates Organization Name
Args:
organizationName: Name of organization
Returns:
A dict containing the result of the put operation"""
uri = self._serviceUrl('general/organizationName')
properties = {'organizationName': organizationName}
return self._PutProperties(uri, properties)
def GetMaximumNumberOfUsers(self):
"""Gets Maximum Number of Users Allowed
Args:
None
Returns: An integer, the maximum number of users"""
result = self.genericGet('general/maximumNumberOfUsers')
return int(result['maximumNumberOfUsers'])
def GetCurrentNumberOfUsers(self):
"""Gets Current Number of Users
Args:
None
Returns: An integer, the current number of users"""
result = self.genericGet('general/currentNumberOfUsers')
return int(result['currentNumberOfUsers'])
def IsDomainVerified(self):
"""Is the domain verified
Args:
None
Returns: Boolean, is domain verified"""
result = self.genericGet('accountInformation/isVerified')
if result['isVerified'] == 'true':
return True
else:
return False
def GetSupportPIN(self):
"""Gets Support PIN
Args:
None
Returns: A string, the Support PIN"""
result = self.genericGet('accountInformation/supportPIN')
return result['supportPIN']
def GetEdition(self):
"""Gets Google Apps Domain Edition
Args:
None
Returns: A string, the domain's edition (premier, education, partner)"""
result = self.genericGet('accountInformation/edition')
return result['edition']
def GetCustomerPIN(self):
"""Gets Customer PIN
Args:
None
Returns: A string, the customer PIN"""
result = self.genericGet('accountInformation/customerPIN')
return result['customerPIN']
def GetCreationTime(self):
"""Gets Domain Creation Time
Args:
None
Returns: A string, the domain's creation time"""
result = self.genericGet('accountInformation/creationTime')
return result['creationTime']
def GetCountryCode(self):
"""Gets Domain Country Code
Args:
None
Returns: A string, the domain's country code. Possible values at:
http://www.iso.org/iso/country_codes/iso_3166_code_lists/english_country_names_and_code_elements.htm"""
result = self.genericGet('accountInformation/countryCode')
return result['countryCode']
def GetAdminSecondaryEmail(self):
"""Gets Domain Admin Secondary Email Address
Args:
None
Returns: A string, the secondary email address for domain admin"""
result = self.genericGet('accountInformation/adminSecondaryEmail')
return result['adminSecondaryEmail']
def UpdateAdminSecondaryEmail(self, adminSecondaryEmail):
"""Gets Domain Creation Time
Args:
adminSecondaryEmail: string, secondary email address of admin
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('accountInformation/adminSecondaryEmail')
properties = {'adminSecondaryEmail': adminSecondaryEmail}
return self._PutProperties(uri, properties)
def GetDomainLogo(self):
"""Gets Domain Logo
This function does not make use of the Google Apps Admin Settings API,
it does an HTTP Get of a url specific to the Google Apps domain. It is
included for completeness sake.
Args:
None
Returns: binary image file"""
import urllib
url = 'http://www.google.com/a/cpanel/'+self.domain+'/images/logo.gif'
response = urllib.urlopen(url)
return response.read()
def UpdateDomainLogo(self, logoImage):
"""Update Domain's Custom Logo
Args:
logoImage: binary image data
Returns: A dict containing the result of the put operation"""
from base64 import base64encode
uri = self._serviceUrl('appearance/customLogo')
properties = {'logoImage': base64encode(logoImage)}
return self._PutProperties(uri, properties)
def GetCNAMEVerificationStatus(self):
"""Gets Domain CNAME Verification Status
Args:
None
Returns: A dict {recordName, verified, verifiedMethod}"""
return self.genericGet('verification/cname')
def UpdateCNAMEVerificationStatus(self, verified):
"""Updates CNAME Verification Status
Args:
verified: boolean, True will retry verification process
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('verification/cname')
properties = self.GetCNAMEVerificationStatus()
properties['verified'] = verified
return self._PutProperties(uri, properties)
def GetMXVerificationStatus(self):
"""Gets Domain MX Verification Status
Args:
None
Returns: A dict {verified, verifiedMethod}"""
return self.genericGet('verification/mx')
def UpdateMXVerificationStatus(self, verified):
"""Updates MX Verification Status
Args:
verified: boolean, True will retry verification process
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('verification/mx')
properties = self.GetMXVerificationStatus()
properties['verified'] = verified
return self._PutProperties(uri, properties)
def GetSSOSettings(self):
"""Gets Domain Single Sign-On Settings
Args:
None
Returns: A dict {samlSignonUri, samlLogoutUri, changePasswordUri, enableSSO, ssoWhitelist, useDomainSpecificIssuer}"""
return self.genericGet('sso/general')
def UpdateSSOSettings(self, enableSSO=None, samlSignonUri=None,
samlLogoutUri=None, changePasswordUri=None,
ssoWhitelist=None, useDomainSpecificIssuer=None):
"""Update SSO Settings.
Args:
enableSSO: boolean, SSO Master on/off switch
samlSignonUri: string, SSO Login Page
samlLogoutUri: string, SSO Logout Page
samlPasswordUri: string, SSO Password Change Page
ssoWhitelist: string, Range of IP Addresses which will see SSO
useDomainSpecificIssuer: boolean, Include Google Apps Domain in Issuer
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('sso/general')
#Get current settings, replace Nones with ''
properties = self.GetSSOSettings()
if properties['samlSignonUri'] == None:
properties['samlSignonUri'] = ''
if properties['samlLogoutUri'] == None:
properties['samlLogoutUri'] = ''
if properties['changePasswordUri'] == None:
properties['changePasswordUri'] = ''
if properties['ssoWhitelist'] == None:
properties['ssoWhitelist'] = ''
#update only the values we were passed
if enableSSO != None:
properties['enableSSO'] = gdata.apps.service._bool2str(enableSSO)
if samlSignonUri != None:
properties['samlSignonUri'] = samlSignonUri
if samlLogoutUri != None:
properties['samlLogoutUri'] = samlLogoutUri
if changePasswordUri != None:
properties['changePasswordUri'] = changePasswordUri
if ssoWhitelist != None:
properties['ssoWhitelist'] = ssoWhitelist
if useDomainSpecificIssuer != None:
properties['useDomainSpecificIssuer'] = gdata.apps.service._bool2str(useDomainSpecificIssuer)
return self._PutProperties(uri, properties)
def GetSSOKey(self):
"""Gets Domain Single Sign-On Signing Key
Args:
None
Returns: A dict {modulus, exponent, algorithm, format}"""
return self.genericGet('sso/signingkey')
def UpdateSSOKey(self, signingKey):
"""Update SSO Settings.
Args:
signingKey: string, public key to be uploaded
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('sso/signingkey')
properties = {'signingKey': signingKey}
return self._PutProperties(uri, properties)
def IsUserMigrationEnabled(self):
"""Is User Migration Enabled
Args:
None
Returns:
boolean, is user migration enabled"""
result = self.genericGet('email/migration')
if result['enableUserMigration'] == 'true':
return True
else:
return False
def UpdateUserMigrationStatus(self, enableUserMigration):
"""Update User Migration Status
Args:
enableUserMigration: boolean, user migration enable/disable
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('email/migration')
properties = {'enableUserMigration': enableUserMigration}
return self._PutProperties(uri, properties)
def GetOutboundGatewaySettings(self):
"""Get Outbound Gateway Settings
Args:
None
Returns:
A dict {smartHost, smtpMode}"""
uri = self._serviceUrl('email/gateway')
try:
return self._GetProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
except TypeError:
#if no outbound gateway is set, we get a TypeError,
#catch it and return nothing...
return {'smartHost': None, 'smtpMode': None}
def UpdateOutboundGatewaySettings(self, smartHost=None, smtpMode=None):
"""Update Outbound Gateway Settings
Args:
smartHost: string, ip address or hostname of outbound gateway
smtpMode: string, SMTP or SMTP_TLS
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('email/gateway')
#Get current settings, replace Nones with ''
properties = GetOutboundGatewaySettings()
if properties['smartHost'] == None:
properties['smartHost'] = ''
if properties['smtpMode'] == None:
properties['smtpMode'] = ''
#If we were passed new values for smartHost or smtpMode, update them
if smartHost != None:
properties['smartHost'] = smartHost
if smtpMode != None:
properties['smtpMode'] = smtpMode
return self._PutProperties(uri, properties)
def AddEmailRoute(self, routeDestination, routeRewriteTo, routeEnabled, bounceNotifications, accountHandling):
"""Adds Domain Email Route
Args:
routeDestination: string, destination ip address or hostname
routeRewriteTo: boolean, rewrite smtp envelop To:
routeEnabled: boolean, enable disable email routing
bounceNotifications: boolean, send bound notificiations to sender
accountHandling: string, which to route, "allAccounts", "provisionedAccounts", "unknownAccounts"
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('emailrouting')
properties = {}
properties['routeDestination'] = routeDestination
properties['routeRewriteTo'] = gdata.apps.service._bool2str(routeRewriteTo)
properties['routeEnabled'] = gdata.apps.service._bool2str(routeEnabled)
properties['bounceNotifications'] = gdata.apps.service._bool2str(bounceNotifications)
properties['accountHandling'] = accountHandling
return self._PostProperties(uri, properties)

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,277 @@
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to audit user data.
AuditService: Set auditing."""
__author__ = 'jlee@pbu.edu'
from base64 import b64encode
import gdata.apps
import gdata.apps.service
import gdata.service
class AuditService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Audit service."""
def _serviceUrl(self, setting_id, domain=None, user=None):
if domain is None:
domain = self.domain
if user is None:
return '/a/feeds/compliance/audit/%s/%s' % (setting_id, domain)
else:
return '/a/feeds/compliance/audit/%s/%s/%s' % (setting_id, domain, user)
def updatePGPKey(self, pgpkey):
"""Updates Public PGP Key Google uses to encrypt audit data
Args:
pgpkey: string, ASCII text of PGP Public Key to be used
Returns:
A dict containing the result of the POST operation."""
uri = self._serviceUrl('publickey')
b64pgpkey = b64encode(pgpkey)
properties = {}
properties['publicKey'] = b64pgpkey
return self._PostProperties(uri, properties)
def createEmailMonitor(self, source_user, destination_user, end_date,
begin_date=None, incoming_headers_only=False,
outgoing_headers_only=False, drafts=False,
drafts_headers_only=False, chats=False,
chats_headers_only=False):
"""Creates a email monitor, forwarding the source_users emails/chats
Args:
source_user: string, the user whose email will be audited
destination_user: string, the user to receive the audited email
end_date: string, the date the audit will end in
"yyyy-MM-dd HH:mm" format, required
begin_date: string, the date the audit will start in
"yyyy-MM-dd HH:mm" format, leave blank to use current time
incoming_headers_only: boolean, whether to audit only the headers of
mail delivered to source user
outgoing_headers_only: boolean, whether to audit only the headers of
mail sent from the source user
drafts: boolean, whether to audit draft messages of the source user
drafts_headers_only: boolean, whether to audit only the headers of
mail drafts saved by the user
chats: boolean, whether to audit archived chats of the source user
chats_headers_only: boolean, whether to audit only the headers of
archived chats of the source user
Returns:
A dict containing the result of the POST operation."""
uri = self._serviceUrl('mail/monitor', user=source_user)
properties = {}
properties['destUserName'] = destination_user
if begin_date is not None:
properties['beginDate'] = begin_date
properties['endDate'] = end_date
if incoming_headers_only:
properties['incomingEmailMonitorLevel'] = 'HEADER_ONLY'
else:
properties['incomingEmailMonitorLevel'] = 'FULL_MESSAGE'
if outgoing_headers_only:
properties['outgoingEmailMonitorLevel'] = 'HEADER_ONLY'
else:
properties['outgoingEmailMonitorLevel'] = 'FULL_MESSAGE'
if drafts:
if drafts_headers_only:
properties['draftMonitorLevel'] = 'HEADER_ONLY'
else:
properties['draftMonitorLevel'] = 'FULL_MESSAGE'
if chats:
if chats_headers_only:
properties['chatMonitorLevel'] = 'HEADER_ONLY'
else:
properties['chatMonitorLevel'] = 'FULL_MESSAGE'
return self._PostProperties(uri, properties)
def getEmailMonitors(self, user):
""""Gets the email monitors for the given user
Args:
user: string, the user to retrieve email monitors for
Returns:
list results of the POST operation
"""
uri = self._serviceUrl('mail/monitor', user=user)
return self._GetPropertiesList(uri)
def deleteEmailMonitor(self, source_user, destination_user):
"""Deletes the email monitor for the given user
Args:
source_user: string, the user who is being monitored
destination_user: string, theuser who recieves the monitored emails
Returns:
Nothing
"""
uri = self._serviceUrl('mail/monitor', user=source_user+'/'+destination_user)
try:
return self._DeleteProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def createAccountInformationRequest(self, user):
"""Creates a request for account auditing details
Args:
user: string, the user to request account information for
Returns:
A dict containing the result of the post operation."""
uri = self._serviceUrl('account', user=user)
properties = {}
#XML Body is left empty
try:
return self._PostProperties(uri, properties)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def getAccountInformationRequestStatus(self, user, request_id):
"""Gets the status of an account auditing request
Args:
user: string, the user whose account auditing details were requested
request_id: string, the request_id
Returns:
A dict containing the result of the get operation."""
uri = self._serviceUrl('account', user=user+'/'+request_id)
try:
return self._GetProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def getAllAccountInformationRequestsStatus(self):
"""Gets the status of all account auditing requests for the domain
Args:
None
Returns:
list results of the POST operation
"""
uri = self._serviceUrl('account')
return self._GetPropertiesList(uri)
def deleteAccountInformationRequest(self, user, request_id):
"""Deletes the request for account auditing information
Args:
user: string, the user whose account auditing details were requested
request_id: string, the request_id
Returns:
Nothing
"""
uri = self._serviceUrl('account', user=user+'/'+request_id)
try:
return self._DeleteProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def createMailboxExportRequest(self, user, begin_date=None, end_date=None, include_deleted=False, search_query=None, headers_only=False):
"""Creates a mailbox export request
Args:
user: string, the user whose mailbox export is being requested
begin_date: string, date of earliest emails to export, optional, defaults to date of account creation
format is 'yyyy-MM-dd HH:mm'
end_date: string, date of latest emails to export, optional, defaults to current date
format is 'yyyy-MM-dd HH:mm'
include_deleted: boolean, whether to include deleted emails in export, mutually exclusive with search_query
search_query: string, gmail style search query, matched emails will be exported, mutually exclusive with include_deleted
Returns:
A dict containing the result of the post operation."""
uri = self._serviceUrl('mail/export', user=user)
properties = {}
if begin_date is not None:
properties['beginDate'] = begin_date
if end_date is not None:
properties['endDate'] = end_date
if include_deleted is not None:
properties['includeDeleted'] = gdata.apps.service._bool2str(include_deleted)
if search_query is not None:
properties['searchQuery'] = search_query
if headers_only is True:
properties['packageContent'] = 'HEADER_ONLY'
else:
properties['packageContent'] = 'FULL_MESSAGE'
return self._PostProperties(uri, properties)
def getMailboxExportRequestStatus(self, user, request_id):
"""Gets the status of an mailbox export request
Args:
user: string, the user whose mailbox were requested
request_id: string, the request_id
Returns:
A dict containing the result of the get operation."""
uri = self._serviceUrl('mail/export', user=user+'/'+request_id)
try:
return self._GetProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def getAllMailboxExportRequestsStatus(self):
"""Gets the status of all mailbox export requests for the domain
Args:
None
Returns:
list results of the POST operation
"""
uri = self._serviceUrl('mail/export')
return self._GetPropertiesList(uri)
def deleteMailboxExportRequest(self, user, request_id):
"""Deletes the request for mailbox export
Args:
user: string, the user whose mailbox were requested
request_id: string, the request_id
Returns:
Nothing
"""
uri = self._serviceUrl('mail/export', user=user+'/'+request_id)
try:
return self._DeleteProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])

View File

@ -0,0 +1,15 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,400 @@
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EmailSettingsClient simplifies Email Settings API calls.
EmailSettingsClient extends gdata.client.GDClient to ease interaction with
the Google Apps Email Settings API. These interactions include the ability
to create labels, filters, aliases, and update web-clip, forwarding, POP,
IMAP, vacation-responder, signature, language, and general settings.
"""
__author__ = 'Claudio Cherubino <ccherubino@google.com>'
import gdata.apps.emailsettings.data
import gdata.client
# Email Settings URI template
# The strings in this template are eventually replaced with the API version,
# Google Apps domain name, username, and settingID, respectively.
EMAIL_SETTINGS_URI_TEMPLATE = '/a/feeds/emailsettings/%s/%s/%s/%s'
# The settingID value for the label requests
SETTING_ID_LABEL = 'label'
# The settingID value for the filter requests
SETTING_ID_FILTER = 'filter'
# The settingID value for the send-as requests
SETTING_ID_SENDAS = 'sendas'
# The settingID value for the webclip requests
SETTING_ID_WEBCLIP = 'webclip'
# The settingID value for the forwarding requests
SETTING_ID_FORWARDING = 'forwarding'
# The settingID value for the POP requests
SETTING_ID_POP = 'pop'
# The settingID value for the IMAP requests
SETTING_ID_IMAP = 'imap'
# The settingID value for the vacation responder requests
SETTING_ID_VACATION_RESPONDER = 'vacation'
# The settingID value for the signature requests
SETTING_ID_SIGNATURE = 'signature'
# The settingID value for the language requests
SETTING_ID_LANGUAGE = 'language'
# The settingID value for the general requests
SETTING_ID_GENERAL = 'general'
# The KEEP action for the email settings
ACTION_KEEP = 'KEEP'
# The ARCHIVE action for the email settings
ACTION_ARCHIVE = 'ARCHIVE'
# The DELETE action for the email settings
ACTION_DELETE = 'DELETE'
# The ALL_MAIL setting for POP enable_for property
POP_ENABLE_FOR_ALL_MAIL = 'ALL_MAIL'
# The MAIL_FROM_NOW_ON setting for POP enable_for property
POP_ENABLE_FOR_MAIL_FROM_NOW_ON = 'MAIL_FROM_NOW_ON'
class EmailSettingsClient(gdata.client.GDClient):
"""Client extension for the Google Email Settings API service.
Attributes:
host: string The hostname for the Email Settings API service.
api_version: string The version of the Email Settings API.
"""
host = 'apps-apis.google.com'
api_version = '2.0'
auth_service = 'apps'
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
ssl = True
def __init__(self, domain, auth_token=None, **kwargs):
"""Constructs a new client for the Email Settings API.
Args:
domain: string The Google Apps domain with Email Settings.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the email settings.
kwargs: The other parameters to pass to the gdata.client.GDClient
constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def make_email_settings_uri(self, username, setting_id):
"""Creates the URI for the Email Settings API call.
Using this client's Google Apps domain, create the URI to setup
email settings for the given user in that domain. If params are provided,
append them as GET params.
Args:
username: string The name of the user affected by this setting.
setting_id: string The key of the setting to be configured.
Returns:
A string giving the URI for Email Settings API calls for this client's
Google Apps domain.
"""
uri = EMAIL_SETTINGS_URI_TEMPLATE % (self.api_version, self.domain,
username, setting_id)
return uri
MakeEmailSettingsUri = make_email_settings_uri
def create_label(self, username, name, **kwargs):
"""Creates a label with the given properties.
Args:
username: string The name of the user.
name: string The name of the label.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
gdata.apps.emailsettings.data.EmailSettingsLabel of the new resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_LABEL)
new_label = gdata.apps.emailsettings.data.EmailSettingsLabel(
uri=uri, name=name)
return self.post(new_label, uri, **kwargs)
CreateLabel = create_label
def create_filter(self, username, from_address=None,
to_address=None, subject=None, has_the_word=None,
does_not_have_the_word=None, has_attachments=None,
label=None, mark_as_read=None, archive=None, **kwargs):
"""Creates a filter with the given properties.
Args:
username: string The name of the user.
from_address: string The source email address for the filter.
to_address: string (optional) The destination email address for
the filter.
subject: string (optional) The value the email must have in its
subject to be filtered.
has_the_word: string (optional) The value the email must have
in its subject or body to be filtered.
does_not_have_the_word: string (optional) The value the email
cannot have in its subject or body to be filtered.
has_attachments: string (optional) A boolean string representing
whether the email must have an attachment to be filtered.
label: string (optional) The name of the label to apply to
messages matching the filter criteria.
mark_as_read: Boolean (optional) Whether or not to mark
messages matching the filter criteria as read.
archive: Boolean (optional) Whether or not to move messages
matching to Archived state.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
gdata.apps.emailsettings.data.EmailSettingsFilter of the new resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_FILTER)
new_filter = gdata.apps.emailsettings.data.EmailSettingsFilter(
uri=uri, from_address=from_address,
to_address=to_address, subject=subject,
has_the_word=has_the_word,
does_not_have_the_word=does_not_have_the_word,
has_attachments=has_attachments, label=label,
mark_as_read=mark_as_read, archive=archive)
return self.post(new_filter, uri, **kwargs)
CreateFilter = create_filter
def create_send_as(self, username, name, address, reply_to=None,
make_default=None, **kwargs):
"""Creates a send-as alias with the given properties.
Args:
username: string The name of the user.
name: string The name that will appear in the "From" field.
address: string The email address that appears as the
origination address for emails sent by this user.
reply_to: string (optional) The address to be used as the reply-to
address in email sent using the alias.
make_default: Boolean (optional) Whether or not this alias should
become the default alias for this user.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
gdata.apps.emailsettings.data.EmailSettingsSendAsAlias of the
new resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_SENDAS)
new_alias = gdata.apps.emailsettings.data.EmailSettingsSendAsAlias(
uri=uri, name=name, address=address,
reply_to=reply_to, make_default=make_default)
return self.post(new_alias, uri, **kwargs)
CreateSendAs = create_send_as
def update_webclip(self, username, enable, **kwargs):
"""Enable/Disable Google Mail web clip.
Args:
username: string The name of the user.
enable: Boolean Whether to enable showing Web clips.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsWebClip of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_WEBCLIP)
new_webclip = gdata.apps.emailsettings.data.EmailSettingsWebClip(
uri=uri, enable=enable)
return self.update(new_webclip, **kwargs)
UpdateWebclip = update_webclip
def update_forwarding(self, username, enable, forward_to=None,
action=None, **kwargs):
"""Update Google Mail Forwarding settings.
Args:
username: string The name of the user.
enable: Boolean Whether to enable incoming email forwarding.
forward_to: (optional) string The address email will be forwarded to.
action: string (optional) The action to perform after forwarding
an email (ACTION_KEEP, ACTION_ARCHIVE, ACTION_DELETE).
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsForwarding of the
updated resource
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_FORWARDING)
new_forwarding = gdata.apps.emailsettings.data.EmailSettingsForwarding(
uri=uri, enable=enable, forward_to=forward_to, action=action)
return self.update(new_forwarding, **kwargs)
UpdateForwarding = update_forwarding
def update_pop(self, username, enable, enable_for=None, action=None,
**kwargs):
"""Update Google Mail POP settings.
Args:
username: string The name of the user.
enable: Boolean Whether to enable incoming POP3 access.
enable_for: string (optional) Whether to enable POP3 for all mail
(POP_ENABLE_FOR_ALL_MAIL), or mail from now on
(POP_ENABLE_FOR_MAIL_FROM_NOW_ON).
action: string (optional) What Google Mail should do with its copy
of the email after it is retrieved using POP (ACTION_KEEP,
ACTION_ARCHIVE, ACTION_DELETE).
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsPop of the updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_POP)
new_pop = gdata.apps.emailsettings.data.EmailSettingsPop(
uri=uri, enable=enable,
enable_for=enable_for, action=action)
return self.update(new_pop, **kwargs)
UpdatePop = update_pop
def update_imap(self, username, enable, **kwargs):
"""Update Google Mail IMAP settings.
Args:
username: string The name of the user.
enable: Boolean Whether to enable IMAP access.language
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsImap of the updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_IMAP)
new_imap = gdata.apps.emailsettings.data.EmailSettingsImap(
uri=uri, enable=enable)
return self.update(new_imap, **kwargs)
UpdateImap = update_imap
def update_vacation(self, username, enable, subject=None, message=None,
contacts_only=None, **kwargs):
"""Update Google Mail vacation-responder settings.
Args:
username: string The name of the user.
enable: Boolean Whether to enable the vacation responder.
subject: string (optional) The subject line of the vacation responder
autoresponse.
message: string (optional) The message body of the vacation responder
autoresponse.
contacts_only: Boolean (optional) Whether to only send autoresponses
to known contacts.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsVacationResponder of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_VACATION_RESPONDER)
new_vacation = gdata.apps.emailsettings.data.EmailSettingsVacationResponder(
uri=uri, enable=enable, subject=subject,
message=message, contacts_only=contacts_only)
return self.update(new_vacation, **kwargs)
UpdateVacation = update_vacation
def update_signature(self, username, signature, **kwargs):
"""Update Google Mail signature.
Args:
username: string The name of the user.
signature: string The signature to be appended to outgoing messages.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsSignature of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_SIGNATURE)
new_signature = gdata.apps.emailsettings.data.EmailSettingsSignature(
uri=uri, signature=signature)
return self.update(new_signature, **kwargs)
UpdateSignature = update_signature
def update_language(self, username, language, **kwargs):
"""Update Google Mail language settings.
Args:
username: string The name of the user.
language: string The language tag for Google Mail's display language.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsLanguage of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_LANGUAGE)
new_language = gdata.apps.emailsettings.data.EmailSettingsLanguage(
uri=uri, language=language)
return self.update(new_language, **kwargs)
UpdateLanguage = update_language
def update_general_settings(self, username, page_size=None, shortcuts=None,
arrows=None, snippets=None, use_unicode=None,
**kwargs):
"""Update Google Mail general settings.
Args:
username: string The name of the user.
page_size: int (optional) The number of conversations to be shown per
page.
shortcuts: Boolean (optional) Whether to enable keyboard shortcuts.
arrows: Boolean (optional) Whether to display arrow-shaped personal
indicators next to email sent specifically to the user.
snippets: Boolean (optional) Whether to display snippets of the messages
in the inbox and when searching.
use_unicode: Boolean (optional) Whether to use UTF-8 (unicode) encoding
for all outgoing messages.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsGeneral of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_GENERAL)
new_general = gdata.apps.emailsettings.data.EmailSettingsGeneral(
uri=uri, page_size=page_size, shortcuts=shortcuts,
arrows=arrows, snippets=snippets, use_unicode=use_unicode)
return self.update(new_general, **kwargs)
UpdateGeneralSettings = update_general_settings

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,264 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to set users' email settings.
EmailSettingsService: Set various email settings.
"""
__author__ = 'google-apps-apis@googlegroups.com'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
# Forwarding and POP3 options
KEEP='KEEP'
ARCHIVE='ARCHIVE'
DELETE='DELETE'
ALL_MAIL='ALL_MAIL'
MAIL_FROM_NOW_ON='MAIL_FROM_NOW_ON'
class EmailSettingsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Email Settings service."""
def _serviceUrl(self, setting_id, username, domain=None):
if domain is None:
domain = self.domain
return '/a/feeds/emailsettings/%s/%s/%s/%s' % (API_VER, domain, username,
setting_id)
def CreateLabel(self, username, label):
"""Create a label.
Args:
username: User to create label for.
label: Label to create.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('label', username)
properties = {'label': label}
return self._PostProperties(uri, properties)
def CreateFilter(self, username, from_=None, to=None, subject=None,
has_the_word=None, does_not_have_the_word=None,
has_attachment=None, label=None, should_mark_as_read=None,
should_archive=None):
"""Create a filter.
Args:
username: User to create filter for.
from_: Filter from string.
to: Filter to string.
subject: Filter subject.
has_the_word: Words to filter in.
does_not_have_the_word: Words to filter out.
has_attachment: Boolean for message having attachment.
label: Label to apply.
should_mark_as_read: Boolean for marking message as read.
should_archive: Boolean for archiving message.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('filter', username)
properties = {}
properties['from'] = from_
properties['to'] = to
properties['subject'] = subject
properties['hasTheWord'] = has_the_word
properties['doesNotHaveTheWord'] = does_not_have_the_word
properties['hasAttachment'] = gdata.apps.service._bool2str(has_attachment)
properties['label'] = label
properties['shouldMarkAsRead'] = gdata.apps.service._bool2str(should_mark_as_read)
properties['shouldArchive'] = gdata.apps.service._bool2str(should_archive)
return self._PostProperties(uri, properties)
def CreateSendAsAlias(self, username, name, address, reply_to=None,
make_default=None):
"""Create alias to send mail as.
Args:
username: User to create alias for.
name: Name of alias.
address: Email address to send from.
reply_to: Email address to reply to.
make_default: Boolean for whether this is the new default sending alias.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('sendas', username)
properties = {}
properties['name'] = name
properties['address'] = address
properties['replyTo'] = reply_to
properties['makeDefault'] = gdata.apps.service._bool2str(make_default)
return self._PostProperties(uri, properties)
def UpdateWebClipSettings(self, username, enable):
"""Update WebClip Settings
Args:
username: User to update forwarding for.
enable: Boolean whether to enable Web Clip.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('webclip', username)
properties = {}
properties['enable'] = gdata.apps.service._bool2str(enable)
return self._PutProperties(uri, properties)
def UpdateForwarding(self, username, enable, forward_to=None, action=None):
"""Update forwarding settings.
Args:
username: User to update forwarding for.
enable: Boolean whether to enable this forwarding rule.
forward_to: Email address to forward to.
action: Action to take after forwarding.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('forwarding', username)
properties = {}
properties['enable'] = gdata.apps.service._bool2str(enable)
if enable is True:
properties['forwardTo'] = forward_to
properties['action'] = action
return self._PutProperties(uri, properties)
def UpdatePop(self, username, enable, enable_for=None, action=None):
"""Update POP3 settings.
Args:
username: User to update POP3 settings for.
enable: Boolean whether to enable POP3.
enable_for: Which messages to make available via POP3.
action: Action to take after user retrieves email via POP3.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('pop', username)
properties = {}
properties['enable'] = gdata.apps.service._bool2str(enable)
if enable is True:
properties['enableFor'] = enable_for
properties['action'] = action
return self._PutProperties(uri, properties)
def UpdateImap(self, username, enable):
"""Update IMAP settings.
Args:
username: User to update IMAP settings for.
enable: Boolean whether to enable IMAP.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('imap', username)
properties = {'enable': gdata.apps.service._bool2str(enable)}
return self._PutProperties(uri, properties)
def UpdateVacation(self, username, enable, subject=None, message=None,
contacts_only=None):
"""Update vacation settings.
Args:
username: User to update vacation settings for.
enable: Boolean whether to enable vacation responses.
subject: Vacation message subject.
message: Vacation message body.
contacts_only: Boolean whether to send message only to contacts.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('vacation', username)
properties = {}
properties['enable'] = gdata.apps.service._bool2str(enable)
if enable is True:
properties['subject'] = subject
properties['message'] = message
properties['contactsOnly'] = gdata.apps.service._bool2str(contacts_only)
return self._PutProperties(uri, properties)
def UpdateSignature(self, username, signature):
"""Update signature.
Args:
username: User to update signature for.
signature: Signature string.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('signature', username)
properties = {'signature': signature}
return self._PutProperties(uri, properties)
def UpdateLanguage(self, username, language):
"""Update user interface language.
Args:
username: User to update language for.
language: Language code.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('language', username)
properties = {'language': language}
return self._PutProperties(uri, properties)
def UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None,
snippets=None, unicode=None):
"""Update general settings.
Args:
username: User to update general settings for.
page_size: Number of messages to show.
shortcuts: Boolean whether shortcuts are enabled.
arrows: Boolean whether arrows are enabled.
snippets: Boolean whether snippets are enabled.
unicode: Wheter unicode is enabled.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('general', username)
properties = {}
if page_size != None:
properties['pageSize'] = str(page_size)
if shortcuts != None:
properties['shortcuts'] = gdata.apps.service._bool2str(shortcuts)
if arrows != None:
properties['arrows'] = gdata.apps.service._bool2str(arrows)
if snippets != None:
properties['snippets'] = gdata.apps.service._bool2str(snippets)
if unicode != None:
properties['unicode'] = gdata.apps.service._bool2str(unicode)
return self._PutProperties(uri, properties)

View File

View File

@ -0,0 +1,387 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to manage groups, group members and group owners.
GroupsService: Provides methods to manage groups, members and owners.
"""
__author__ = 'google-apps-apis@googlegroups.com'
import urllib
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER = '2.0'
BASE_URL = '/a/feeds/group/' + API_VER + '/%s'
GROUP_MEMBER_URL = BASE_URL + '?member=%s'
GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s'
GROUP_ID_URL = BASE_URL + '/%s'
MEMBER_URL = BASE_URL + '/%s/member'
MEMBER_WITH_SUSPENDED_URL = MEMBER_URL + '?includeSuspendedUsers=%s'
MEMBER_ID_URL = MEMBER_URL + '/%s'
OWNER_URL = BASE_URL + '/%s/owner'
OWNER_WITH_SUSPENDED_URL = OWNER_URL + '?includeSuspendedUsers=%s'
OWNER_ID_URL = OWNER_URL + '/%s'
PERMISSION_OWNER = 'Owner'
PERMISSION_MEMBER = 'Member'
PERMISSION_DOMAIN = 'Domain'
PERMISSION_ANYONE = 'Anyone'
class GroupsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Groups service."""
def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email,
direct_only=False, domain=None, suspended_users=False):
if domain is None:
domain = self.domain
if service_type == 'group':
if group_id != '' and is_existed:
return GROUP_ID_URL % (domain, group_id)
elif member_id != '':
if direct_only:
return GROUP_MEMBER_DIRECT_URL % (domain, urllib.quote_plus(member_id),
self._Bool2Str(direct_only))
else:
return GROUP_MEMBER_URL % (domain, urllib.quote_plus(member_id))
else:
return BASE_URL % (domain)
if service_type == 'member':
if member_id != '' and is_existed:
return MEMBER_ID_URL % (domain, group_id, urllib.quote_plus(member_id))
elif suspended_users:
return MEMBER_WITH_SUSPENDED_URL % (domain, group_id,
self._Bool2Str(suspended_users))
else:
return MEMBER_URL % (domain, group_id)
if service_type == 'owner':
if owner_email != '' and is_existed:
return OWNER_ID_URL % (domain, group_id, urllib.quote_plus(owner_email))
elif suspended_users:
return OWNER_WITH_SUSPENDED_URL % (domain, group_id,
self._Bool2Str(suspended_users))
else:
return OWNER_URL % (domain, group_id)
def _Bool2Str(self, b):
if b is None:
return None
return str(b is True).lower()
def _IsExisted(self, uri):
try:
self._GetProperties(uri)
return True
except gdata.apps.service.AppsForYourDomainException, e:
if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST:
return False
else:
raise e
def CreateGroup(self, group_id, group_name, description, email_permission):
"""Create a group.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the create operation.
"""
uri = self._ServiceUrl('group', False, group_id, '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PostProperties(uri, properties)
def UpdateGroup(self, group_id, group_name, description, email_permission):
"""Update a group's name, description and/or permission.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the update operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PutProperties(uri, properties)
def RetrieveGroup(self, group_id):
"""Retrieve a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '')
return self._GetProperties(uri)
def RetrieveAllGroups(self):
"""Retrieve all groups in the domain.
Args:
None
Returns:
A list containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', '', '')
return self._GetPropertiesList(uri)
def RetrievePageOfGroups(self, start_group=None):
"""Retrieve one page of groups in the domain.
Args:
start_group: The key to continue for pagination through all groups.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', '', '')
if start_group is not None:
uri += "?start="+start_group
property_feed = self._GetPropertyFeed(uri)
return property_feed
def RetrieveGroups(self, member_id, direct_only=False):
"""Retrieve all groups that belong to the given member_id.
Args:
member_id: The member's email address (e.g. member@example.com).
direct_only: Boolean whether only return groups that this member directly belongs to.
Returns:
A list containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', member_id, '', direct_only=direct_only)
return self._GetPropertiesList(uri)
def DeleteGroup(self, group_id):
"""Delete a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the delete operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '')
return self._DeleteProperties(uri)
def AddMemberToGroup(self, member_id, group_id):
"""Add a member to a group.
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('member', False, group_id, member_id, '')
properties = {}
properties['memberId'] = member_id
return self._PostProperties(uri, properties)
def IsMember(self, member_id, group_id):
"""Check whether the given member already exists in the given group.
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member exists in the group. False otherwise.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '')
return self._IsExisted(uri)
def RetrieveMember(self, member_id, group_id):
"""Retrieve the given member in the given group.
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '')
return self._GetProperties(uri)
def RetrieveAllMembers(self, group_id, suspended_users=False):
"""Retrieve all members in the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
suspended_users: A boolean; should we include any suspended users in
the membership list returned?
Returns:
A list containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, '', '',
suspended_users=suspended_users)
return self._GetPropertiesList(uri)
def RetrievePageOfMembers(self, group_id, suspended_users=False, start=None):
"""Retrieve one page of members of a given group.
Args:
group_id: The ID of the group (e.g. us-sales).
suspended_users: A boolean; should we include any suspended users in
the membership list returned?
start: The key to continue for pagination through all members.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, '', '',
suspended_users=suspended_users)
if start is not None:
if suspended_users:
uri += "&start="+start
else:
uri += "?start="+start
property_feed = self._GetPropertyFeed(uri)
return property_feed
def RemoveMemberFromGroup(self, member_id, group_id):
"""Remove the given member from the given group.
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '')
return self._DeleteProperties(uri)
def AddOwnerToGroup(self, owner_email, group_id):
"""Add an owner to a group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('owner', False, group_id, '', owner_email)
properties = {}
properties['email'] = owner_email
return self._PostProperties(uri, properties)
def IsOwner(self, owner_email, group_id):
"""Check whether the given member an owner of the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member is an owner of the given group. False otherwise.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
return self._IsExisted(uri)
def RetrieveOwner(self, owner_email, group_id):
"""Retrieve the given owner in the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
return self._GetProperties(uri)
def RetrieveAllOwners(self, group_id, suspended_users=False):
"""Retrieve all owners of the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
suspended_users: A boolean; should we include any suspended users in
the ownership list returned?
Returns:
A list containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', '',
suspended_users=suspended_users)
return self._GetPropertiesList(uri)
def RetrievePageOfOwners(self, group_id, suspended_users=False, start=None):
"""Retrieve one page of owners of the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
suspended_users: A boolean; should we include any suspended users in
the ownership list returned?
start: The key to continue for pagination through all owners.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', '',
suspended_users=suspended_users)
if start is not None:
if suspended_users:
uri += "&start="+start
else:
uri += "?start="+start
property_feed = self._GetPropertyFeed(uri)
return property_feed
def RemoveOwnerFromGroup(self, owner_email, group_id):
"""Remove the given owner from the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
return self._DeleteProperties(uri)

View File

@ -0,0 +1,212 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains objects used with Google Apps."""
__author__ = 'google-apps-apis@googlegroups.com'
import atom
import gdata
# XML namespaces which are often used in Google Apps entity.
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
class Rfc822Msg(atom.AtomBase):
"""The Migration rfc822Msg element."""
_tag = 'rfc822Msg'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['encoding'] = 'encoding'
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.encoding = 'base64'
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def Rfc822MsgFromString(xml_string):
"""Parse in the Rrc822 message from the XML definition."""
return atom.CreateClassFromXMLString(Rfc822Msg, xml_string)
class MailItemProperty(atom.AtomBase):
"""The Migration mailItemProperty element."""
_tag = 'mailItemProperty'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def MailItemPropertyFromString(xml_string):
"""Parse in the MailItemProperiy from the XML definition."""
return atom.CreateClassFromXMLString(MailItemProperty, xml_string)
class Label(atom.AtomBase):
"""The Migration label element."""
_tag = 'label'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['labelName'] = 'label_name'
def __init__(self, label_name=None,
extension_elements=None, extension_attributes=None,
text=None):
self.label_name = label_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LabelFromString(xml_string):
"""Parse in the mailItemProperty from the XML definition."""
return atom.CreateClassFromXMLString(Label, xml_string)
class MailEntry(gdata.GDataEntry):
"""A Google Migration flavor of an Atom Entry."""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
[MailItemProperty])
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
rfc822_msg=None, mail_item_property=None, label=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.rfc822_msg = rfc822_msg
self.mail_item_property = mail_item_property
self.label = label
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def MailEntryFromString(xml_string):
"""Parse in the MailEntry from the XML definition."""
return atom.CreateClassFromXMLString(MailEntry, xml_string)
class BatchMailEntry(gdata.BatchEntry):
"""A Google Migration flavor of an Atom Entry."""
_tag = gdata.BatchEntry._tag
_namespace = gdata.BatchEntry._namespace
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
[MailItemProperty])
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
rfc822_msg=None, mail_item_property=None, label=None,
batch_operation=None, batch_id=None, batch_status=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.rfc822_msg = rfc822_msg or None
self.mail_item_property = mail_item_property or []
self.label = label or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def BatchMailEntryFromString(xml_string):
"""Parse in the BatchMailEntry from the XML definition."""
return atom.CreateClassFromXMLString(BatchMailEntry, xml_string)
class BatchMailEventFeed(gdata.BatchFeed):
"""A Migration event feed flavor of an Atom Feed."""
_tag = gdata.BatchFeed._tag
_namespace = gdata.BatchFeed._namespace
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchMailEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, interrupted=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
interrupted=interrupted,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchMailEventFeedFromString(xml_string):
"""Parse in the BatchMailEventFeed from the XML definition."""
return atom.CreateClassFromXMLString(BatchMailEventFeed, xml_string)

View File

@ -0,0 +1,129 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the methods to import mail via Google Apps Email Migration API.
MigrationService: Provides methids to import mail.
"""
__author__ = 'google-apps-apis@googlegroups.com'
import base64
import gdata
import gdata.apps.service
import gdata.service
from gdata.apps import migration
API_VER = '2.0'
class MigrationService(gdata.apps.service.AppsService):
"""Client for the EMAPI migration service. Use either ImportMail to import
one message at a time, or AddBatchEntry and SubmitBatch to import a batch of
messages at a time.
"""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None):
gdata.apps.service.AppsService.__init__(
self, email=email, password=password, domain=domain, source=source,
server=server, additional_headers=additional_headers)
self.mail_batch = migration.BatchMailEventFeed()
def _BaseURL(self):
return '/a/feeds/migration/%s/%s' % (API_VER, self.domain)
def ImportMail(self, user_name, mail_message, mail_item_properties,
mail_labels):
"""Import a single mail message.
Args:
user_name: The username to import messages to.
mail_message: An RFC822 format email message.
mail_item_properties: A list of Gmail properties to apply to the message.
mail_labels: A list of labels to apply to the message.
Returns:
A MailEntry representing the successfully imported message.
Raises:
AppsForYourDomainException: An error occurred importing the message.
"""
uri = '%s/%s/mail' % (self._BaseURL(), user_name)
mail_entry = migration.MailEntry()
mail_entry.rfc822_msg = migration.Rfc822Msg(text=(base64.b64encode(
mail_message)))
mail_entry.rfc822_msg.encoding = 'base64'
mail_entry.mail_item_property = map(
lambda x: migration.MailItemProperty(value=x), mail_item_properties)
mail_entry.label = map(lambda x: migration.Label(label_name=x),
mail_labels)
try:
return migration.MailEntryFromString(str(self.Post(mail_entry, uri)))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def AddBatchEntry(self, mail_message, mail_item_properties,
mail_labels):
"""Add a message to the current batch that you later will submit.
Args:
mail_message: An RFC822 format email message.
mail_item_properties: A list of Gmail properties to apply to the message.
mail_labels: A list of labels to apply to the message.
Returns:
The length of the MailEntry representing the message.
"""
mail_entry = migration.BatchMailEntry()
mail_entry.rfc822_msg = migration.Rfc822Msg(text=(base64.b64encode(
mail_message)))
mail_entry.rfc822_msg.encoding = 'base64'
mail_entry.mail_item_property = map(
lambda x: migration.MailItemProperty(value=x), mail_item_properties)
mail_entry.label = map(lambda x: migration.Label(label_name=x),
mail_labels)
self.mail_batch.AddBatchEntry(mail_entry)
return len(str(mail_entry))
def SubmitBatch(self, user_name):
"""Send a all the mail items you have added to the batch to the server.
Args:
user_name: The username to import messages to.
Returns:
A HTTPResponse from the web service call.
Raises:
AppsForYourDomainException: An error occurred importing the batch.
"""
uri = '%s/%s/mail/batch' % (self._BaseURL(), user_name)
try:
self.result = self.Post(self.mail_batch, uri,
converter=migration.BatchMailEventFeedFromString)
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
self.mail_batch = migration.BatchMailEventFeed()
return self.result

View File

@ -0,0 +1,297 @@
#!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to manage organization unit and organization user.
OrganizationService: Provides methods to manage organization unit and organization user.
"""
__author__ = 'Alexandre Vivien (alex@simplecode.fr)'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER = '2.0'
CUSTOMER_BASE_URL = '/a/feeds/customer/2.0/customerId'
BASE_UNIT_URL = '/a/feeds/orgunit/' + API_VER + '/%s'
UNIT_URL = BASE_UNIT_URL + '/%s'
UNIT_ALL_URL = BASE_UNIT_URL + '?get=all'
UNIT_CHILD_URL = BASE_UNIT_URL + '?get=children&orgUnitPath=%s'
BASE_USER_URL = '/a/feeds/orguser/' + API_VER + '/%s'
USER_URL = BASE_USER_URL + '/%s'
USER_ALL_URL = BASE_USER_URL + '?get=all'
USER_CHILD_URL = BASE_USER_URL + '?get=children&orgUnitPath=%s'
class OrganizationService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Organizations service."""
def _Bool2Str(self, b):
if b is None:
return None
return str(b is True).lower()
def RetrieveCustomerId(self):
"""Retrieve the Customer ID for the account of the authenticated administrator making this request.
Args:
None.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = CUSTOMER_BASE_URL
return self._GetProperties(uri)
def CreateOrgUnit(self, customer_id, name, parent_org_unit_path='/', description='', block_inheritance=False):
"""Create a Organization Unit.
Args:
customer_id: The ID of the Google Apps customer.
name: The simple organization unit text name, not the full path name.
parent_org_unit_path: The full path of the parental tree to this organization unit (default: '/').
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
description: The human readable text description of the organization unit (optional).
block_inheritance: This parameter blocks policy setting inheritance
from organization units higher in the organization tree (default: False).
Returns:
A dict containing the result of the create operation.
"""
uri = BASE_UNIT_URL % (customer_id)
properties = {}
properties['name'] = name
properties['parentOrgUnitPath'] = parent_org_unit_path
properties['description'] = description
properties['blockInheritance'] = self._Bool2Str(block_inheritance)
return self._PostProperties(uri, properties)
def UpdateOrgUnit(self, customer_id, org_unit_path, name=None, parent_org_unit_path=None,
description=None, block_inheritance=None):
"""Update a Organization Unit.
Args:
customer_id: The ID of the Google Apps customer.
org_unit_path: The organization's full path name.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
name: The simple organization unit text name, not the full path name.
parent_org_unit_path: The full path of the parental tree to this organization unit.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
description: The human readable text description of the organization unit.
block_inheritance: This parameter blocks policy setting inheritance
from organization units higher in the organization tree.
Returns:
A dict containing the result of the update operation.
"""
uri = UNIT_URL % (customer_id, org_unit_path)
properties = {}
if name:
properties['name'] = name
if parent_org_unit_path:
properties['parentOrgUnitPath'] = parent_org_unit_path
if description:
properties['description'] = description
if block_inheritance:
properties['blockInheritance'] = self._Bool2Str(block_inheritance)
return self._PutProperties(uri, properties)
def MoveUserToOrgUnit(self, customer_id, org_unit_path, users_to_move):
"""Move a user to an Organization Unit.
Args:
customer_id: The ID of the Google Apps customer.
org_unit_path: The organization's full path name.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
users_to_move: Email addresses list of users to move. Note: You can move a maximum of 25 users at one time.
Returns:
A dict containing the result of the update operation.
"""
uri = UNIT_URL % (customer_id, org_unit_path)
properties = {}
if users_to_move and isinstance(users_to_move, list):
properties['usersToMove'] = ', '.join(users_to_move)
return self._PutProperties(uri, properties)
def RetrieveOrgUnit(self, customer_id, org_unit_path):
"""Retrieve a Orgunit based on its path.
Args:
customer_id: The ID of the Google Apps customer.
org_unit_path: The organization's full path name.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
Returns:
A dict containing the result of the retrieve operation.
"""
uri = UNIT_URL % (customer_id, org_unit_path)
return self._GetProperties(uri)
def DeleteOrgUnit(self, customer_id, org_unit_path):
"""Delete a Orgunit based on its path.
Args:
customer_id: The ID of the Google Apps customer.
org_unit_path: The organization's full path name.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
Returns:
A dict containing the result of the delete operation.
"""
uri = UNIT_URL % (customer_id, org_unit_path)
return self._DeleteProperties(uri)
def RetrieveAllOrgUnits(self, customer_id):
"""Retrieve all OrgUnits in the customer's domain.
Args:
customer_id: The ID of the Google Apps customer.
Returns:
A list containing the result of the retrieve operation.
"""
uri = UNIT_ALL_URL % (customer_id)
return self._GetPropertiesList(uri)
def RetrievePageOfOrgUnits(self, customer_id, startKey=None):
"""Retrieve one page of OrgUnits in the customer's domain.
Args:
customer_id: The ID of the Google Apps customer.
startKey: The key to continue for pagination through all OrgUnits.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = UNIT_ALL_URL % (customer_id)
if startKey is not None:
uri += "&startKey=" + startKey
property_feed = self._GetPropertyFeed(uri)
return property_feed
def RetrieveSubOrgUnits(self, customer_id, org_unit_path):
"""Retrieve all Sub-OrgUnits of the provided OrgUnit.
Args:
customer_id: The ID of the Google Apps customer.
org_unit_path: The organization's full path name.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
Returns:
A list containing the result of the retrieve operation.
"""
uri = UNIT_CHILD_URL % (customer_id, org_unit_path)
return self._GetPropertiesList(uri)
def RetrieveOrgUser(self, customer_id, user_email):
"""Retrieve the OrgUnit of the user.
Args:
customer_id: The ID of the Google Apps customer.
user_email: The email address of the user.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = USER_URL % (customer_id, user_email)
return self._GetProperties(uri)
def UpdateOrgUser(self, customer_id, user_email, org_unit_path):
"""Update the OrgUnit of a OrgUser.
Args:
customer_id: The ID of the Google Apps customer.
user_email: The email address of the user.
org_unit_path: The new organization's full path name.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
Returns:
A dict containing the result of the update operation.
"""
uri = USER_URL % (customer_id, user_email)
properties = {}
if org_unit_path:
properties['orgUnitPath'] = org_unit_path
return self._PutProperties(uri, properties)
def RetrieveAllOrgUsers(self, customer_id):
"""Retrieve all OrgUsers in the customer's domain.
Args:
customer_id: The ID of the Google Apps customer.
Returns:
A list containing the result of the retrieve operation.
"""
uri = USER_ALL_URL % (customer_id)
return self._GetPropertiesList(uri)
def RetrievePageOfOrgUsers(self, customer_id, startKey=None):
"""Retrieve one page of OrgUsers in the customer's domain.
Args:
customer_id: The ID of the Google Apps customer.
startKey: The key to continue for pagination through all OrgUnits.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = USER_ALL_URL % (customer_id)
if startKey is not None:
uri += "&startKey=" + startKey
property_feed = self._GetPropertyFeed(uri)
return property_feed
def RetrieveOrgUnitUsers(self, customer_id, org_unit_path):
"""Retrieve all OrgUsers of the provided OrgUnit.
Args:
customer_id: The ID of the Google Apps customer.
org_unit_path: The organization's full path name.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
Returns:
A list containing the result of the retrieve operation.
"""
uri = USER_CHILD_URL % (customer_id, org_unit_path)
return self._GetPropertiesList(uri)
def RetrieveOrgUnitPageOfUsers(self, customer_id, org_unit_path, startKey=None):
"""Retrieve one page of OrgUsers of the provided OrgUnit.
Args:
customer_id: The ID of the Google Apps customer.
org_unit_path: The organization's full path name.
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
startKey: The key to continue for pagination through all OrgUsers.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = USER_CHILD_URL % (customer_id, org_unit_path)
if startKey is not None:
uri += "&startKey=" + startKey
property_feed = self._GetPropertyFeed(uri)
return property_feed

View File

@ -0,0 +1,552 @@
#!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import urllib
import gdata
import atom.service
import gdata.service
import gdata.apps
import atom
API_VER="2.0"
HTTP_OK=200
UNKOWN_ERROR=1000
USER_DELETED_RECENTLY=1100
USER_SUSPENDED=1101
DOMAIN_USER_LIMIT_EXCEEDED=1200
DOMAIN_ALIAS_LIMIT_EXCEEDED=1201
DOMAIN_SUSPENDED=1202
DOMAIN_FEATURE_UNAVAILABLE=1203
ENTITY_EXISTS=1300
ENTITY_DOES_NOT_EXIST=1301
ENTITY_NAME_IS_RESERVED=1302
ENTITY_NAME_NOT_VALID=1303
INVALID_GIVEN_NAME=1400
INVALID_FAMILY_NAME=1401
INVALID_PASSWORD=1402
INVALID_USERNAME=1403
INVALID_HASH_FUNCTION_NAME=1404
INVALID_HASH_DIGGEST_LENGTH=1405
INVALID_EMAIL_ADDRESS=1406
INVALID_QUERY_PARAMETER_VALUE=1407
TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500
DEFAULT_QUOTA_LIMIT='2048'
class Error(Exception):
pass
class AppsForYourDomainException(Error):
def __init__(self, response):
Error.__init__(self, response)
try:
self.element_tree = ElementTree.fromstring(response['body'])
self.error_code = int(self.element_tree[0].attrib['errorCode'])
self.reason = self.element_tree[0].attrib['reason']
self.invalidInput = self.element_tree[0].attrib['invalidInput']
except:
self.error_code = UNKOWN_ERROR
class AppsService(gdata.service.GDataService):
"""Client for the Google Apps Provisioning service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None,
**kwargs):
"""Creates a client for the Google Apps Provisioning service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
domain: string (optional) The Google Apps domain name.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'apps-apis.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='apps', source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
self.port = 443
self.domain = domain
def _baseURL(self):
return "/a/feeds/%s" % self.domain
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = self.Get(next.href, converter=func)
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def RetrievePageOfEmailLists(self, start_email_list_name=None,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve one page of email list"""
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
if start_email_list_name is not None:
uri += "?startEmailListName=%s" % start_email_list_name
try:
return gdata.apps.EmailListFeedFromString(str(self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllEmailLists(
self, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all emaillists in this domain."""
first_page = self.RetrievePageOfEmailLists(num_retries=num_retries,
delay=delay,
backoff=backoff)
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.EmailListRecipientFeedFromString,
num_retries=num_retries, delay=delay, backoff=backoff)
def RetrieveAllEmailLists(self):
"""Retrieve all email list of a domain."""
ret = self.RetrievePageOfEmailLists()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RetrieveEmailList(self, list_name):
"""Retreive a single email list by the list's name."""
uri = "%s/emailList/%s/%s" % (
self._baseURL(), API_VER, list_name)
try:
return self.Get(uri, converter=gdata.apps.EmailListEntryFromString)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveEmailLists(self, recipient):
"""Retrieve All Email List Subscriptions for an Email Address."""
uri = "%s/emailList/%s?recipient=%s" % (
self._baseURL(), API_VER, recipient)
try:
ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RemoveRecipientFromEmailList(self, recipient, list_name):
"""Remove recipient from email list."""
uri = "%s/emailList/%s/%s/recipient/%s" % (
self._baseURL(), API_VER, list_name, recipient)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfRecipients(self, list_name, start_recipient=None,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve one page of recipient of an email list. """
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
if start_recipient is not None:
uri += "?startRecipient=%s" % start_recipient
try:
return gdata.apps.EmailListRecipientFeedFromString(str(
self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllRecipients(
self, list_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all recipients of a particular emaillist."""
first_page = self.RetrievePageOfRecipients(list_name,
num_retries=num_retries,
delay=delay,
backoff=backoff)
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.EmailListRecipientFeedFromString,
num_retries=num_retries, delay=delay, backoff=backoff)
def RetrieveAllRecipients(self, list_name):
"""Retrieve all recipient of an email list."""
ret = self.RetrievePageOfRecipients(list_name)
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListRecipientFeedFromString)
def AddRecipientToEmailList(self, recipient, list_name):
"""Add a recipient to a email list."""
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
recipient_entry = gdata.apps.EmailListRecipientEntry()
recipient_entry.who = gdata.apps.Who(email=recipient)
try:
return gdata.apps.EmailListRecipientEntryFromString(
str(self.Post(recipient_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteEmailList(self, list_name):
"""Delete a email list"""
uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateEmailList(self, list_name):
"""Create a email list. """
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
email_list_entry = gdata.apps.EmailListEntry()
email_list_entry.email_list = gdata.apps.EmailList(name=list_name)
try:
return gdata.apps.EmailListEntryFromString(
str(self.Post(email_list_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteNickname(self, nickname):
"""Delete a nickname"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfNicknames(self, start_nickname=None,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve one page of nicknames in the domain"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
if start_nickname is not None:
uri += "?startNickname=%s" % start_nickname
try:
return gdata.apps.NicknameFeedFromString(str(self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllNicknames(
self, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all nicknames in this domain."""
first_page = self.RetrievePageOfNicknames(num_retries=num_retries,
delay=delay,
backoff=backoff)
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries,
delay=delay, backoff=backoff)
def RetrieveAllNicknames(self):
"""Retrieve all nicknames in the domain"""
ret = self.RetrievePageOfNicknames()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def GetGeneratorForAllNicknamesOfAUser(
self, user_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all nicknames of a particular user."""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
first_page = gdata.apps.NicknameFeedFromString(str(self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries,
delay=delay, backoff=backoff)
def RetrieveNicknames(self, user_name):
"""Retrieve nicknames of the user"""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNickname(self, nickname):
"""Retrieve a nickname.
Args:
nickname: string The nickname to retrieve
Returns:
gdata.apps.NicknameEntry
"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
return gdata.apps.NicknameEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateNickname(self, user_name, nickname):
"""Create a nickname"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
nickname_entry = gdata.apps.NicknameEntry()
nickname_entry.login = gdata.apps.Login(user_name=user_name)
nickname_entry.nickname = gdata.apps.Nickname(name=nickname)
try:
return gdata.apps.NicknameEntryFromString(
str(self.Post(nickname_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteUser(self, user_name):
"""Delete a user account"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def UpdateUser(self, user_name, user_entry):
"""Update a user account."""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateUser(self, user_name, family_name, given_name, password,
suspended='false', quota_limit=None,
password_hash_function=None,
change_password=None):
"""Create a user account. """
uri = "%s/user/%s" % (self._baseURL(), API_VER)
user_entry = gdata.apps.UserEntry()
user_entry.login = gdata.apps.Login(
user_name=user_name, password=password, suspended=suspended,
hash_function_name=password_hash_function,
change_password=change_password)
user_entry.name = gdata.apps.Name(family_name=family_name,
given_name=given_name)
if quota_limit is not None:
user_entry.quota = gdata.apps.Quota(limit=str(quota_limit))
try:
return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def SuspendUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'true':
user_entry.login.suspended = 'true'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RestoreUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'false':
user_entry.login.suspended = 'false'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RetrieveUser(self, user_name):
"""Retrieve an user account.
Args:
user_name: string The user name to retrieve
Returns:
gdata.apps.UserEntry
"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfUsers(self, start_username=None,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve one page of users in this domain."""
uri = "%s/user/%s" % (self._baseURL(), API_VER)
if start_username is not None:
uri += "?startUsername=%s" % start_username
try:
return gdata.apps.UserFeedFromString(str(self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllUsers(self,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all users in this domain."""
first_page = self.RetrievePageOfUsers(num_retries=num_retries, delay=delay,
backoff=backoff)
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.UserFeedFromString, num_retries=num_retries,
delay=delay, backoff=backoff)
def RetrieveAllUsers(self):
"""Retrieve all users in this domain. OBSOLETE"""
ret = self.RetrievePageOfUsers()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.UserFeedFromString)
class PropertyService(gdata.service.GDataService):
"""Client for the Google Apps Property service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='apps', source=source,
server=server,
additional_headers=additional_headers)
self.ssl = True
self.port = 443
self.domain = domain
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = self.Get(next.href, converter=func)
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def _GetPropertyEntry(self, properties):
property_entry = gdata.apps.PropertyEntry()
property = []
for name, value in properties.iteritems():
if name is not None and value is not None:
property.append(gdata.apps.Property(name=name, value=value))
property_entry.property = property
return property_entry
def _PropertyEntry2Dict(self, property_entry):
properties = {}
for i, property in enumerate(property_entry.property):
properties[property.name] = property.value
return properties
def _GetPropertyFeed(self, uri):
try:
return gdata.apps.PropertyFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _GetPropertiesList(self, uri):
property_feed = self._GetPropertyFeed(uri)
# pagination
property_feed = self.AddAllElementsFromAllPages(
property_feed, gdata.apps.PropertyFeedFromString)
properties_list = []
for property_entry in property_feed.entry:
properties_list.append(self._PropertyEntry2Dict(property_entry))
return properties_list
def _GetProperties(self, uri):
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Get(uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _PostProperties(self, uri, properties):
property_entry = self._GetPropertyEntry(properties)
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Post(property_entry, uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _PutProperties(self, uri, properties):
property_entry = self._GetPropertyEntry(properties)
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Put(property_entry, uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _DeleteProperties(self, uri):
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _bool2str(b):
if b is None:
return None
return str(b is True).lower()

View File

@ -0,0 +1,39 @@
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides a base class to represent property elements in feeds.
This module is used for version 2 of the Google Data APIs. The primary class
in this module is AppsProperty.
"""
__author__ = 'Vic Fryzel <vicfryzel@google.com>'
import atom.core
import gdata.apps
class AppsProperty(atom.core.XmlElement):
"""Represents an <apps:property> element in a feed."""
_qname = gdata.apps.APPS_TEMPLATE % 'property'
name = 'name'
value = 'value'

952
python/gdata/auth.py Normal file
View File

@ -0,0 +1,952 @@
#!/usr/bin/python
#
# Copyright (C) 2007 - 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import math
import random
import re
import time
import types
import urllib
import atom.http_interface
import atom.token_store
import atom.url
import gdata.oauth as oauth
import gdata.oauth.rsa as oauth_rsa
import gdata.tlslite.utils.keyfactory as keyfactory
import gdata.tlslite.utils.cryptomath as cryptomath
import gdata.gauth
__author__ = 'api.jscudder (Jeff Scudder)'
PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth='
AUTHSUB_AUTH_LABEL = 'AuthSub token='
"""This module provides functions and objects used with Google authentication.
Details on Google authorization mechanisms used with the Google Data APIs can
be found here:
http://code.google.com/apis/gdata/auth.html
http://code.google.com/apis/accounts/
The essential functions are the following.
Related to ClientLogin:
generate_client_login_request_body: Constructs the body of an HTTP request to
obtain a ClientLogin token for a specific
service.
extract_client_login_token: Creates a ClientLoginToken with the token from a
success response to a ClientLogin request.
get_captcha_challenge: If the server responded to the ClientLogin request
with a CAPTCHA challenge, this method extracts the
CAPTCHA URL and identifying CAPTCHA token.
Related to AuthSub:
generate_auth_sub_url: Constructs a full URL for a AuthSub request. The
user's browser must be sent to this Google Accounts
URL and redirected back to the app to obtain the
AuthSub token.
extract_auth_sub_token_from_url: Once the user's browser has been
redirected back to the web app, use this
function to create an AuthSubToken with
the correct authorization token and scope.
token_from_http_body: Extracts the AuthSubToken value string from the
server's response to an AuthSub session token upgrade
request.
"""
def generate_client_login_request_body(email, password, service, source,
account_type='HOSTED_OR_GOOGLE', captcha_token=None,
captcha_response=None):
"""Creates the body of the autentication request
See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request
for more details.
Args:
email: str
password: str
service: str
source: str
account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid
values are 'GOOGLE' and 'HOSTED'
captcha_token: str (optional)
captcha_response: str (optional)
Returns:
The HTTP body to send in a request for a client login token.
"""
return gdata.gauth.generate_client_login_request_body(email, password,
service, source, account_type, captcha_token, captcha_response)
GenerateClientLoginRequestBody = generate_client_login_request_body
def GenerateClientLoginAuthToken(http_body):
"""Returns the token value to use in Authorization headers.
Reads the token from the server's response to a Client Login request and
creates header value to use in requests.
Args:
http_body: str The body of the server's HTTP response to a Client Login
request
Returns:
The value half of an Authorization header.
"""
token = get_client_login_token(http_body)
if token:
return 'GoogleLogin auth=%s' % token
return None
def get_client_login_token(http_body):
"""Returns the token value for a ClientLoginToken.
Reads the token from the server's response to a Client Login request and
creates the token value string to use in requests.
Args:
http_body: str The body of the server's HTTP response to a Client Login
request
Returns:
The token value string for a ClientLoginToken.
"""
return gdata.gauth.get_client_login_token_string(http_body)
def extract_client_login_token(http_body, scopes):
"""Parses the server's response and returns a ClientLoginToken.
Args:
http_body: str The body of the server's HTTP response to a Client Login
request. It is assumed that the login request was successful.
scopes: list containing atom.url.Urls or strs. The scopes list contains
all of the partial URLs under which the client login token is
valid. For example, if scopes contains ['http://example.com/foo']
then the client login token would be valid for
http://example.com/foo/bar/baz
Returns:
A ClientLoginToken which is valid for the specified scopes.
"""
token_string = get_client_login_token(http_body)
token = ClientLoginToken(scopes=scopes)
token.set_token_string(token_string)
return token
def get_captcha_challenge(http_body,
captcha_base_url='http://www.google.com/accounts/'):
"""Returns the URL and token for a CAPTCHA challenge issued by the server.
Args:
http_body: str The body of the HTTP response from the server which
contains the CAPTCHA challenge.
captcha_base_url: str This function returns a full URL for viewing the
challenge image which is built from the server's response. This
base_url is used as the beginning of the URL because the server
only provides the end of the URL. For example the server provides
'Captcha?ctoken=Hi...N' and the URL for the image is
'http://www.google.com/accounts/Captcha?ctoken=Hi...N'
Returns:
A dictionary containing the information needed to repond to the CAPTCHA
challenge, the image URL and the ID token of the challenge. The
dictionary is in the form:
{'token': string identifying the CAPTCHA image,
'url': string containing the URL of the image}
Returns None if there was no CAPTCHA challenge in the response.
"""
return gdata.gauth.get_captcha_challenge(http_body, captcha_base_url)
GetCaptchaChallenge = get_captcha_challenge
def GenerateOAuthRequestTokenUrl(
oauth_input_params, scopes,
request_token_url='https://www.google.com/accounts/OAuthGetRequestToken',
extra_parameters=None):
"""Generate a URL at which a request for OAuth request token is to be sent.
Args:
oauth_input_params: OAuthInputParams OAuth input parameters.
scopes: list of strings The URLs of the services to be accessed.
request_token_url: string The beginning of the request token URL. This is
normally 'https://www.google.com/accounts/OAuthGetRequestToken' or
'/accounts/OAuthGetRequestToken'
extra_parameters: dict (optional) key-value pairs as any additional
parameters to be included in the URL and signature while making a
request for fetching an OAuth request token. All the OAuth parameters
are added by default. But if provided through this argument, any
default parameters will be overwritten. For e.g. a default parameter
oauth_version 1.0 can be overwritten if
extra_parameters = {'oauth_version': '2.0'}
Returns:
atom.url.Url OAuth request token URL.
"""
scopes_string = ' '.join([str(scope) for scope in scopes])
parameters = {'scope': scopes_string}
if extra_parameters:
parameters.update(extra_parameters)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
oauth_input_params.GetConsumer(), http_url=request_token_url,
parameters=parameters)
oauth_request.sign_request(oauth_input_params.GetSignatureMethod(),
oauth_input_params.GetConsumer(), None)
return atom.url.parse_url(oauth_request.to_url())
def GenerateOAuthAuthorizationUrl(
request_token,
authorization_url='https://www.google.com/accounts/OAuthAuthorizeToken',
callback_url=None, extra_params=None,
include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope'):
"""Generates URL at which user will login to authorize the request token.
Args:
request_token: gdata.auth.OAuthToken OAuth request token.
authorization_url: string The beginning of the authorization URL. This is
normally 'https://www.google.com/accounts/OAuthAuthorizeToken' or
'/accounts/OAuthAuthorizeToken'
callback_url: string (optional) The URL user will be sent to after
logging in and granting access.
extra_params: dict (optional) Additional parameters to be sent.
include_scopes_in_callback: Boolean (default=False) if set to True, and
if 'callback_url' is present, the 'callback_url' will be modified to
include the scope(s) from the request token as a URL parameter. The
key for the 'callback' URL's scope parameter will be
OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
a parameter to the 'callback' URL, is that the page which receives
the OAuth token will be able to tell which URLs the token grants
access to.
scopes_param_prefix: string (default='oauth_token_scope') The URL
parameter key which maps to the list of valid scopes for the token.
This URL parameter will be included in the callback URL along with
the scopes of the token as value if include_scopes_in_callback=True.
Returns:
atom.url.Url OAuth authorization URL.
"""
scopes = request_token.scopes
if isinstance(scopes, list):
scopes = ' '.join(scopes)
if include_scopes_in_callback and callback_url:
if callback_url.find('?') > -1:
callback_url += '&'
else:
callback_url += '?'
callback_url += urllib.urlencode({scopes_param_prefix:scopes})
oauth_token = oauth.OAuthToken(request_token.key, request_token.secret)
oauth_request = oauth.OAuthRequest.from_token_and_callback(
token=oauth_token, callback=callback_url,
http_url=authorization_url, parameters=extra_params)
return atom.url.parse_url(oauth_request.to_url())
def GenerateOAuthAccessTokenUrl(
authorized_request_token,
oauth_input_params,
access_token_url='https://www.google.com/accounts/OAuthGetAccessToken',
oauth_version='1.0',
oauth_verifier=None):
"""Generates URL at which user will login to authorize the request token.
Args:
authorized_request_token: gdata.auth.OAuthToken OAuth authorized request
token.
oauth_input_params: OAuthInputParams OAuth input parameters.
access_token_url: string The beginning of the authorization URL. This is
normally 'https://www.google.com/accounts/OAuthGetAccessToken' or
'/accounts/OAuthGetAccessToken'
oauth_version: str (default='1.0') oauth_version parameter.
oauth_verifier: str (optional) If present, it is assumed that the client
will use the OAuth v1.0a protocol which includes passing the
oauth_verifier (as returned by the SP) in the access token step.
Returns:
atom.url.Url OAuth access token URL.
"""
oauth_token = oauth.OAuthToken(authorized_request_token.key,
authorized_request_token.secret)
parameters = {'oauth_version': oauth_version}
if oauth_verifier is not None:
parameters['oauth_verifier'] = oauth_verifier
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
oauth_input_params.GetConsumer(), token=oauth_token,
http_url=access_token_url, parameters=parameters)
oauth_request.sign_request(oauth_input_params.GetSignatureMethod(),
oauth_input_params.GetConsumer(), oauth_token)
return atom.url.parse_url(oauth_request.to_url())
def GenerateAuthSubUrl(next, scope, secure=False, session=True,
request_url='https://www.google.com/accounts/AuthSubRequest',
domain='default'):
"""Generate a URL at which the user will login and be redirected back.
Users enter their credentials on a Google login page and a token is sent
to the URL specified in next. See documentation for AuthSub login at:
http://code.google.com/apis/accounts/AuthForWebApps.html
Args:
request_url: str The beginning of the request URL. This is normally
'http://www.google.com/accounts/AuthSubRequest' or
'/accounts/AuthSubRequest'
next: string The URL user will be sent to after logging in.
scope: string The URL of the service to be accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
domain: str (optional) The Google Apps domain for this account. If this
is not a Google Apps account, use 'default' which is the default
value.
"""
# Translate True/False values for parameters into numeric values acceoted
# by the AuthSub service.
if secure:
secure = 1
else:
secure = 0
if session:
session = 1
else:
session = 0
request_params = urllib.urlencode({'next': next, 'scope': scope,
'secure': secure, 'session': session,
'hd': domain})
if request_url.find('?') == -1:
return '%s?%s' % (request_url, request_params)
else:
# The request URL already contained url parameters so we should add
# the parameters using the & seperator
return '%s&%s' % (request_url, request_params)
def generate_auth_sub_url(next, scopes, secure=False, session=True,
request_url='https://www.google.com/accounts/AuthSubRequest',
domain='default', scopes_param_prefix='auth_sub_scopes'):
"""Constructs a URL string for requesting a multiscope AuthSub token.
The generated token will contain a URL parameter to pass along the
requested scopes to the next URL. When the Google Accounts page
redirects the broswser to the 'next' URL, it appends the single use
AuthSub token value to the URL as a URL parameter with the key 'token'.
However, the information about which scopes were requested is not
included by Google Accounts. This method adds the scopes to the next
URL before making the request so that the redirect will be sent to
a page, and both the token value and the list of scopes can be
extracted from the request URL.
Args:
next: atom.url.URL or string The URL user will be sent to after
authorizing this web application to access their data.
scopes: list containint strings The URLs of the services to be accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
request_url: atom.url.Url or str The beginning of the request URL. This
is normally 'http://www.google.com/accounts/AuthSubRequest' or
'/accounts/AuthSubRequest'
domain: The domain which the account is part of. This is used for Google
Apps accounts, the default value is 'default' which means that the
requested account is a Google Account (@gmail.com for example)
scopes_param_prefix: str (optional) The requested scopes are added as a
URL parameter to the next URL so that the page at the 'next' URL can
extract the token value and the valid scopes from the URL. The key
for the URL parameter defaults to 'auth_sub_scopes'
Returns:
An atom.url.Url which the user's browser should be directed to in order
to authorize this application to access their information.
"""
if isinstance(next, (str, unicode)):
next = atom.url.parse_url(next)
scopes_string = ' '.join([str(scope) for scope in scopes])
next.params[scopes_param_prefix] = scopes_string
if isinstance(request_url, (str, unicode)):
request_url = atom.url.parse_url(request_url)
request_url.params['next'] = str(next)
request_url.params['scope'] = scopes_string
if session:
request_url.params['session'] = 1
else:
request_url.params['session'] = 0
if secure:
request_url.params['secure'] = 1
else:
request_url.params['secure'] = 0
request_url.params['hd'] = domain
return request_url
def AuthSubTokenFromUrl(url):
"""Extracts the AuthSub token from the URL.
Used after the AuthSub redirect has sent the user to the 'next' page and
appended the token to the URL. This function returns the value to be used
in the Authorization header.
Args:
url: str The URL of the current page which contains the AuthSub token as
a URL parameter.
"""
token = TokenFromUrl(url)
if token:
return 'AuthSub token=%s' % token
return None
def TokenFromUrl(url):
"""Extracts the AuthSub token from the URL.
Returns the raw token value.
Args:
url: str The URL or the query portion of the URL string (after the ?) of
the current page which contains the AuthSub token as a URL parameter.
"""
if url.find('?') > -1:
query_params = url.split('?')[1]
else:
query_params = url
for pair in query_params.split('&'):
if pair.startswith('token='):
return pair[6:]
return None
def extract_auth_sub_token_from_url(url,
scopes_param_prefix='auth_sub_scopes', rsa_key=None):
"""Creates an AuthSubToken and sets the token value and scopes from the URL.
After the Google Accounts AuthSub pages redirect the user's broswer back to
the web application (using the 'next' URL from the request) the web app must
extract the token from the current page's URL. The token is provided as a
URL parameter named 'token' and if generate_auth_sub_url was used to create
the request, the token's valid scopes are included in a URL parameter whose
name is specified in scopes_param_prefix.
Args:
url: atom.url.Url or str representing the current URL. The token value
and valid scopes should be included as URL parameters.
scopes_param_prefix: str (optional) The URL parameter key which maps to
the list of valid scopes for the token.
Returns:
An AuthSubToken with the token value from the URL and set to be valid for
the scopes passed in on the URL. If no scopes were included in the URL,
the AuthSubToken defaults to being valid for no scopes. If there was no
'token' parameter in the URL, this function returns None.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if 'token' not in url.params:
return None
scopes = []
if scopes_param_prefix in url.params:
scopes = url.params[scopes_param_prefix].split(' ')
token_value = url.params['token']
if rsa_key:
token = SecureAuthSubToken(rsa_key, scopes=scopes)
else:
token = AuthSubToken(scopes=scopes)
token.set_token_string(token_value)
return token
def AuthSubTokenFromHttpBody(http_body):
"""Extracts the AuthSub token from an HTTP body string.
Used to find the new session token after making a request to upgrade a
single use AuthSub token.
Args:
http_body: str The repsonse from the server which contains the AuthSub
key. For example, this function would find the new session token
from the server's response to an upgrade token request.
Returns:
The header value to use for Authorization which contains the AuthSub
token.
"""
token_value = token_from_http_body(http_body)
if token_value:
return '%s%s' % (AUTHSUB_AUTH_LABEL, token_value)
return None
def token_from_http_body(http_body):
"""Extracts the AuthSub token from an HTTP body string.
Used to find the new session token after making a request to upgrade a
single use AuthSub token.
Args:
http_body: str The repsonse from the server which contains the AuthSub
key. For example, this function would find the new session token
from the server's response to an upgrade token request.
Returns:
The raw token value to use in an AuthSubToken object.
"""
for response_line in http_body.splitlines():
if response_line.startswith('Token='):
# Strip off Token= and return the token value string.
return response_line[6:]
return None
TokenFromHttpBody = token_from_http_body
def OAuthTokenFromUrl(url, scopes_param_prefix='oauth_token_scope'):
"""Creates an OAuthToken and sets token key and scopes (if present) from URL.
After the Google Accounts OAuth pages redirect the user's broswer back to
the web application (using the 'callback' URL from the request) the web app
can extract the token from the current page's URL. The token is same as the
request token, but it is either authorized (if user grants access) or
unauthorized (if user denies access). The token is provided as a
URL parameter named 'oauth_token' and if it was chosen to use
GenerateOAuthAuthorizationUrl with include_scopes_in_param=True, the token's
valid scopes are included in a URL parameter whose name is specified in
scopes_param_prefix.
Args:
url: atom.url.Url or str representing the current URL. The token value
and valid scopes should be included as URL parameters.
scopes_param_prefix: str (optional) The URL parameter key which maps to
the list of valid scopes for the token.
Returns:
An OAuthToken with the token key from the URL and set to be valid for
the scopes passed in on the URL. If no scopes were included in the URL,
the OAuthToken defaults to being valid for no scopes. If there was no
'oauth_token' parameter in the URL, this function returns None.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if 'oauth_token' not in url.params:
return None
scopes = []
if scopes_param_prefix in url.params:
scopes = url.params[scopes_param_prefix].split(' ')
token_key = url.params['oauth_token']
token = OAuthToken(key=token_key, scopes=scopes)
return token
def OAuthTokenFromHttpBody(http_body):
"""Parses the HTTP response body and returns an OAuth token.
The returned OAuth token will just have key and secret parameters set.
It won't have any knowledge about the scopes or oauth_input_params. It is
your responsibility to make it aware of the remaining parameters.
Returns:
OAuthToken OAuth token.
"""
token = oauth.OAuthToken.from_string(http_body)
oauth_token = OAuthToken(key=token.key, secret=token.secret)
return oauth_token
class OAuthSignatureMethod(object):
"""Holds valid OAuth signature methods.
RSA_SHA1: Class to build signature according to RSA-SHA1 algorithm.
HMAC_SHA1: Class to build signature according to HMAC-SHA1 algorithm.
"""
HMAC_SHA1 = oauth.OAuthSignatureMethod_HMAC_SHA1
class RSA_SHA1(oauth_rsa.OAuthSignatureMethod_RSA_SHA1):
"""Provides implementation for abstract methods to return RSA certs."""
def __init__(self, private_key, public_cert):
self.private_key = private_key
self.public_cert = public_cert
def _fetch_public_cert(self, unused_oauth_request):
return self.public_cert
def _fetch_private_cert(self, unused_oauth_request):
return self.private_key
class OAuthInputParams(object):
"""Stores OAuth input parameters.
This class is a store for OAuth input parameters viz. consumer key and secret,
signature method and RSA key.
"""
def __init__(self, signature_method, consumer_key, consumer_secret=None,
rsa_key=None, requestor_id=None):
"""Initializes object with parameters required for using OAuth mechanism.
NOTE: Though consumer_secret and rsa_key are optional, either of the two
is required depending on the value of the signature_method.
Args:
signature_method: class which provides implementation for strategy class
oauth.oauth.OAuthSignatureMethod. Signature method to be used for
signing each request. Valid implementations are provided as the
constants defined by gdata.auth.OAuthSignatureMethod. Currently
they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
gdata.auth.OAuthSignatureMethod.HMAC_SHA1. Instead of passing in
the strategy class, you may pass in a string for 'RSA_SHA1' or
'HMAC_SHA1'. If you plan to use OAuth on App Engine (or another
WSGI environment) I recommend specifying signature method using a
string (the only options are 'RSA_SHA1' and 'HMAC_SHA1'). In these
environments there are sometimes issues with pickling an object in
which a member references a class or function. Storing a string to
refer to the signature method mitigates complications when
pickling.
consumer_key: string Domain identifying third_party web application.
consumer_secret: string (optional) Secret generated during registration.
Required only for HMAC_SHA1 signature method.
rsa_key: string (optional) Private key required for RSA_SHA1 signature
method.
requestor_id: string (optional) User email adress to make requests on
their behalf. This parameter should only be set when performing
2 legged OAuth requests.
"""
if (signature_method == OAuthSignatureMethod.RSA_SHA1
or signature_method == 'RSA_SHA1'):
self.__signature_strategy = 'RSA_SHA1'
elif (signature_method == OAuthSignatureMethod.HMAC_SHA1
or signature_method == 'HMAC_SHA1'):
self.__signature_strategy = 'HMAC_SHA1'
else:
self.__signature_strategy = signature_method
self.rsa_key = rsa_key
self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self.requestor_id = requestor_id
def __get_signature_method(self):
if self.__signature_strategy == 'RSA_SHA1':
return OAuthSignatureMethod.RSA_SHA1(self.rsa_key, None)
elif self.__signature_strategy == 'HMAC_SHA1':
return OAuthSignatureMethod.HMAC_SHA1()
else:
return self.__signature_strategy()
def __set_signature_method(self, signature_method):
if (signature_method == OAuthSignatureMethod.RSA_SHA1
or signature_method == 'RSA_SHA1'):
self.__signature_strategy = 'RSA_SHA1'
elif (signature_method == OAuthSignatureMethod.HMAC_SHA1
or signature_method == 'HMAC_SHA1'):
self.__signature_strategy = 'HMAC_SHA1'
else:
self.__signature_strategy = signature_method
_signature_method = property(__get_signature_method, __set_signature_method,
doc="""Returns object capable of signing the request using RSA of HMAC.
Replaces the _signature_method member to avoid pickle errors.""")
def GetSignatureMethod(self):
"""Gets the OAuth signature method.
Returns:
object of supertype <oauth.oauth.OAuthSignatureMethod>
"""
return self._signature_method
def GetConsumer(self):
"""Gets the OAuth consumer.
Returns:
object of type <oauth.oauth.Consumer>
"""
return self._consumer
class ClientLoginToken(atom.http_interface.GenericToken):
"""Stores the Authorization header in auth_header and adds to requests.
This token will add it's Authorization header to an HTTP request
as it is made. Ths token class is simple but
some Token classes must calculate portions of the Authorization header
based on the request being made, which is why the token is responsible
for making requests via an http_client parameter.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
def __init__(self, auth_header=None, scopes=None):
self.auth_header = auth_header
self.scopes = scopes or []
def __str__(self):
return self.auth_header
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header and makes the HTTP request."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def get_token_string(self):
"""Removes PROGRAMMATIC_AUTH_LABEL to give just the token value."""
return self.auth_header[len(PROGRAMMATIC_AUTH_LABEL):]
def set_token_string(self, token_string):
self.auth_header = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, token_string)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
class AuthSubToken(ClientLoginToken):
def get_token_string(self):
"""Removes AUTHSUB_AUTH_LABEL to give just the token value."""
return self.auth_header[len(AUTHSUB_AUTH_LABEL):]
def set_token_string(self, token_string):
self.auth_header = '%s%s' % (AUTHSUB_AUTH_LABEL, token_string)
class OAuthToken(atom.http_interface.GenericToken):
"""Stores the token key, token secret and scopes for which token is valid.
This token adds the authorization header to each request made. It
re-calculates authorization header for every request since the OAuth
signature to be added to the authorization header is dependent on the
request parameters.
Attributes:
key: str The value for the OAuth token i.e. token key.
secret: str The value for the OAuth token secret.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
oauth_input_params: OAuthInputParams OAuth input parameters.
"""
def __init__(self, key=None, secret=None, scopes=None,
oauth_input_params=None):
self.key = key
self.secret = secret
self.scopes = scopes or []
self.oauth_input_params = oauth_input_params
def __str__(self):
return self.get_token_string()
def get_token_string(self):
"""Returns the token string.
The token string returned is of format
oauth_token=[0]&oauth_token_secret=[1], where [0] and [1] are some strings.
Returns:
A token string of format oauth_token=[0]&oauth_token_secret=[1],
where [0] and [1] are some strings. If self.secret is absent, it just
returns oauth_token=[0]. If self.key is absent, it just returns
oauth_token_secret=[1]. If both are absent, it returns None.
"""
if self.key and self.secret:
return urllib.urlencode({'oauth_token': self.key,
'oauth_token_secret': self.secret})
elif self.key:
return 'oauth_token=%s' % self.key
elif self.secret:
return 'oauth_token_secret=%s' % self.secret
else:
return None
def set_token_string(self, token_string):
"""Sets the token key and secret from the token string.
Args:
token_string: str Token string of form
oauth_token=[0]&oauth_token_secret=[1]. If oauth_token is not present,
self.key will be None. If oauth_token_secret is not present,
self.secret will be None.
"""
token_params = cgi.parse_qs(token_string, keep_blank_values=False)
if 'oauth_token' in token_params:
self.key = token_params['oauth_token'][0]
if 'oauth_token_secret' in token_params:
self.secret = token_params['oauth_token_secret'][0]
def GetAuthHeader(self, http_method, http_url, realm=''):
"""Get the authentication header.
Args:
http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc.
http_url: string or atom.url.Url HTTP URL to which request is made.
realm: string (default='') realm parameter to be included in the
authorization header.
Returns:
dict Header to be sent with every subsequent request after
authentication.
"""
if isinstance(http_url, types.StringTypes):
http_url = atom.url.parse_url(http_url)
header = None
token = None
if self.key or self.secret:
token = oauth.OAuthToken(self.key, self.secret)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.oauth_input_params.GetConsumer(), token=token,
http_url=str(http_url), http_method=http_method,
parameters=http_url.params)
oauth_request.sign_request(self.oauth_input_params.GetSignatureMethod(),
self.oauth_input_params.GetConsumer(), token)
header = oauth_request.to_header(realm=realm)
header['Authorization'] = header['Authorization'].replace('+', '%2B')
return header
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header and makes the HTTP request."""
if not headers:
headers = {}
if self.oauth_input_params.requestor_id:
url.params['xoauth_requestor_id'] = self.oauth_input_params.requestor_id
headers.update(self.GetAuthHeader(operation, url))
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
class SecureAuthSubToken(AuthSubToken):
"""Stores the rsa private key, token, and scopes for the secure AuthSub token.
This token adds the authorization header to each request made. It
re-calculates authorization header for every request since the secure AuthSub
signature to be added to the authorization header is dependent on the
request parameters.
Attributes:
rsa_key: string The RSA private key in PEM format that the token will
use to sign requests
token_string: string (optional) The value for the AuthSub token.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
def __init__(self, rsa_key, token_string=None, scopes=None):
self.rsa_key = keyfactory.parsePEMKey(rsa_key)
self.token_string = token_string or ''
self.scopes = scopes or []
def __str__(self):
return self.get_token_string()
def get_token_string(self):
return str(self.token_string)
def set_token_string(self, token_string):
self.token_string = token_string
def GetAuthHeader(self, http_method, http_url):
"""Generates the Authorization header.
The form of the secure AuthSub Authorization header is
Authorization: AuthSub token="token" sigalg="sigalg" data="data" sig="sig"
and data represents a string in the form
data = http_method http_url timestamp nonce
Args:
http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc.
http_url: string or atom.url.Url HTTP URL to which request is made.
Returns:
dict Header to be sent with every subsequent request after authentication.
"""
timestamp = int(math.floor(time.time()))
nonce = '%lu' % random.randrange(1, 2**64)
data = '%s %s %d %s' % (http_method, str(http_url), timestamp, nonce)
sig = cryptomath.bytesToBase64(self.rsa_key.hashAndSign(data))
header = {'Authorization': '%s"%s" data="%s" sig="%s" sigalg="rsa-sha1"' %
(AUTHSUB_AUTH_LABEL, self.token_string, data, sig)}
return header
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header and makes the HTTP request."""
if not headers:
headers = {}
headers.update(self.GetAuthHeader(operation, url))
return http_client.request(operation, url, data=data, headers=headers)

View File

@ -0,0 +1,697 @@
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Base."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
# XML namespaces which are often used in Google Base entities.
GBASE_NAMESPACE = 'http://base.google.com/ns/1.0'
GBASE_TEMPLATE = '{http://base.google.com/ns/1.0}%s'
GMETA_NAMESPACE = 'http://base.google.com/ns-metadata/1.0'
GMETA_TEMPLATE = '{http://base.google.com/ns-metadata/1.0}%s'
class ItemAttributeContainer(atom.AtomBase):
"""Provides methods for finding Google Base Item attributes.
Google Base item attributes are child nodes in the gbase namespace. Google
Base allows you to define your own item attributes and this class provides
methods to interact with the custom attributes.
"""
def GetItemAttributes(self, name):
"""Returns a list of all item attributes which have the desired name.
Args:
name: str The tag of the desired base attributes. For example, calling
this method with 'rating' would return a list of ItemAttributes
represented by a 'g:rating' tag.
Returns:
A list of matching ItemAttribute objects.
"""
result = []
for attrib in self.item_attributes:
if attrib.name == name:
result.append(attrib)
return result
def FindItemAttribute(self, name):
"""Get the contents of the first Base item attribute which matches name.
This method is deprecated, please use GetItemAttributes instead.
Args:
name: str The tag of the desired base attribute. For example, calling
this method with name = 'rating' would search for a tag rating
in the GBase namespace in the item attributes.
Returns:
The text contents of the item attribute, or none if the attribute was
not found.
"""
for attrib in self.item_attributes:
if attrib.name == name:
return attrib.text
return None
def AddItemAttribute(self, name, value, value_type=None, access=None):
"""Adds a new item attribute tag containing the value.
Creates a new extension element in the GBase namespace to represent a
Google Base item attribute.
Args:
name: str The tag name for the new attribute. This must be a valid xml
tag name. The tag will be placed in the GBase namespace.
value: str Contents for the item attribute
value_type: str (optional) The type of data in the vlaue, Examples: text
float
access: str (optional) Used to hide attributes. The attribute is not
exposed in the snippets feed if access is set to 'private'.
"""
new_attribute = ItemAttribute(name, text=value,
text_type=value_type, access=access)
self.item_attributes.append(new_attribute)
return new_attribute
def SetItemAttribute(self, name, value):
"""Changes an existing item attribute's value."""
for attrib in self.item_attributes:
if attrib.name == name:
attrib.text = value
return
def RemoveItemAttribute(self, name):
"""Deletes the first extension element which matches name.
Deletes the first extension element which matches name.
"""
for i in xrange(len(self.item_attributes)):
if self.item_attributes[i].name == name:
del self.item_attributes[i]
return
# We need to overwrite _ConvertElementTreeToMember to add special logic to
# convert custom attributes to members
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
elif child_tree.tag.find('{%s}' % GBASE_NAMESPACE) == 0:
# If this is in the gbase namespace, make it into an extension element.
name = child_tree.tag[child_tree.tag.index('}')+1:]
value = child_tree.text
if child_tree.attrib.has_key('type'):
value_type = child_tree.attrib['type']
else:
value_type = None
attrib=self.AddItemAttribute(name, value, value_type)
for sub in child_tree.getchildren():
sub_name = sub.tag[sub.tag.index('}')+1:]
sub_value=sub.text
if sub.attrib.has_key('type'):
sub_type = sub.attrib['type']
else:
sub_type=None
attrib.AddItemAttribute(sub_name, sub_value, sub_type)
else:
atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
# We need to overwtite _AddMembersToElementTree to add special logic to
# convert custom members to XML nodes.
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Convert all special custom item attributes to nodes
for attribute in self.item_attributes:
attribute._BecomeChildElement(tree)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
class ItemAttribute(ItemAttributeContainer):
"""An optional or user defined attribute for a GBase item.
Google Base allows items to have custom attribute child nodes. These nodes
have contents and a type attribute which tells Google Base whether the
contents are text, a float value with units, etc. The Atom text class has
the same structure, so this class inherits from Text.
"""
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_attributes['access'] = 'access'
def __init__(self, name, text_type=None, access=None, text=None,
extension_elements=None, extension_attributes=None, item_attributes=None):
"""Constructor for a GBase item attribute
Args:
name: str The name of the attribute. Examples include
price, color, make, model, pages, salary, etc.
text_type: str (optional) The type associated with the text contents
access: str (optional) If the access attribute is set to 'private', the
attribute will not be included in the item's description in the
snippets feed
text: str (optional) The text data in the this element
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute
value string pairs
"""
self.name = name
self.type = text_type
self.access = access
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.item_attributes = item_attributes or []
def _BecomeChildElement(self, tree):
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.name)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.name))
self._AddMembersToElementTree(new_tree)
return new_tree
def ItemAttributeFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ItemAttributeFromElementTree(element_tree)
def _ItemAttributeFromElementTree(element_tree):
if element_tree.tag.find(GBASE_TEMPLATE % '') == 0:
to_return = ItemAttribute('')
to_return._HarvestElementTree(element_tree)
to_return.name = element_tree.tag[element_tree.tag.index('}')+1:]
if to_return.name and to_return.name != '':
return to_return
return None
class Label(atom.AtomBase):
"""The Google Base label element"""
_tag = 'label'
_namespace = GBASE_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LabelFromString(xml_string):
return atom.CreateClassFromXMLString(Label, xml_string)
class Thumbnail(atom.AtomBase):
"""The Google Base thumbnail element"""
_tag = 'thumbnail'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['width'] = 'width'
_attributes['height'] = 'height'
def __init__(self, width=None, height=None, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.width = width
self.height = height
def ThumbnailFromString(xml_string):
return atom.CreateClassFromXMLString(Thumbnail, xml_string)
class ImageLink(atom.Text):
"""The Google Base image_link element"""
_tag = 'image_link'
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_children['{%s}thumbnail' % GMETA_NAMESPACE] = ('thumbnail', [Thumbnail])
def __init__(self, thumbnail=None, text=None, extension_elements=None,
text_type=None, extension_attributes=None):
self.thumbnail = thumbnail or []
self.text = text
self.type = text_type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ImageLinkFromString(xml_string):
return atom.CreateClassFromXMLString(ImageLink, xml_string)
class ItemType(atom.Text):
"""The Google Base item_type element"""
_tag = 'item_type'
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
def __init__(self, text=None, extension_elements=None,
text_type=None, extension_attributes=None):
self.text = text
self.type = text_type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ItemTypeFromString(xml_string):
return atom.CreateClassFromXMLString(ItemType, xml_string)
class MetaItemType(ItemType):
"""The Google Base item_type element"""
_tag = 'item_type'
_namespace = GMETA_NAMESPACE
_children = ItemType._children.copy()
_attributes = ItemType._attributes.copy()
def MetaItemTypeFromString(xml_string):
return atom.CreateClassFromXMLString(MetaItemType, xml_string)
class Value(atom.AtomBase):
"""Metadata about common values for a given attribute
A value is a child of an attribute which comes from the attributes feed.
The value's text is a commonly used value paired with an attribute name
and the value's count tells how often this value appears for the given
attribute in the search results.
"""
_tag = 'value'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['count'] = 'count'
def __init__(self, count=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Attribute metadata element
Args:
count: str (optional) The number of times the value in text is given
for the parent attribute.
text: str (optional) The value which appears in the search results.
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute value
string pairs
"""
self.count = count
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ValueFromString(xml_string):
return atom.CreateClassFromXMLString(Value, xml_string)
class Attribute(atom.Text):
"""Metadata about an attribute from the attributes feed
An entry from the attributes feed contains a list of attributes. Each
attribute describes the attribute's type and count of the items which
use the attribute.
"""
_tag = 'attribute'
_namespace = GMETA_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_children['{%s}value' % GMETA_NAMESPACE] = ('value', [Value])
_attributes['count'] = 'count'
_attributes['name'] = 'name'
def __init__(self, name=None, attribute_type=None, count=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Attribute metadata element
Args:
name: str (optional) The name of the attribute
attribute_type: str (optional) The type for the attribute. Examples:
test, float, etc.
count: str (optional) The number of times this attribute appears in
the query results.
value: list (optional) The values which are often used for this
attirbute.
text: str (optional) The text contents of the XML for this attribute.
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute value
string pairs
"""
self.name = name
self.type = attribute_type
self.count = count
self.value = value or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def AttributeFromString(xml_string):
return atom.CreateClassFromXMLString(Attribute, xml_string)
class Attributes(atom.AtomBase):
"""A collection of Google Base metadata attributes"""
_tag = 'attributes'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
def __init__(self, attribute=None, extension_elements=None,
extension_attributes=None, text=None):
self.attribute = attribute or []
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class GBaseItem(ItemAttributeContainer, gdata.BatchEntry):
"""An Google Base flavor of an Atom Entry.
Google Base items have required attributes, recommended attributes, and user
defined attributes. The required attributes are stored in this class as
members, and other attributes are stored as extension elements. You can
access the recommended and user defined attributes by using
AddItemAttribute, SetItemAttribute, FindItemAttribute, and
RemoveItemAttribute.
The Base Item
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}label' % GBASE_NAMESPACE] = ('label', [Label])
_children['{%s}item_type' % GBASE_NAMESPACE] = ('item_type', ItemType)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, control=None,
label=None, item_type=None, item_attributes=None,
batch_operation=None, batch_id=None, batch_status=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.label = label or []
self.item_type = item_type
self.item_attributes = item_attributes or []
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseItemFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItem, xml_string)
class GBaseSnippet(GBaseItem):
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = GBaseItem._children.copy()
_attributes = GBaseItem._attributes.copy()
def GBaseSnippetFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseSnippet, xml_string)
class GBaseAttributeEntry(gdata.GDataEntry):
"""An Atom Entry from the attributes feed"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, label=None,
attribute=None, control=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.label = label or []
self.attribute = attribute or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseAttributeEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseAttributeEntry, xml_string)
class GBaseItemTypeEntry(gdata.GDataEntry):
"""An Atom entry from the item types feed
These entries contain a list of attributes which are stored in one
XML node called attributes. This class simplifies the data structure
by treating attributes as a list of attribute instances.
Note that the item_type for an item type entry is in the Google Base meta
namespace as opposed to item_types encountered in other feeds.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}attributes' % GMETA_NAMESPACE] = ('attributes', Attributes)
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
_children['{%s}item_type' % GMETA_NAMESPACE] = ('item_type', MetaItemType)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, label=None,
item_type=None, control=None, attribute=None, attributes=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.label = label or []
self.item_type = item_type
self.attributes = attributes
self.attribute = attribute or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseItemTypeEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemTypeEntry, xml_string)
class GBaseItemFeed(gdata.BatchFeed):
"""A feed containing Google Base Items"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItem])
def GBaseItemFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemFeed, xml_string)
class GBaseSnippetFeed(gdata.GDataFeed):
"""A feed containing Google Base Snippets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseSnippet])
def GBaseSnippetFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseSnippetFeed, xml_string)
class GBaseAttributesFeed(gdata.GDataFeed):
"""A feed containing Google Base Attributes
A query sent to the attributes feed will return a feed of
attributes which are present in the items that match the
query.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[GBaseAttributeEntry])
def GBaseAttributesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseAttributesFeed, xml_string)
class GBaseLocalesFeed(gdata.GDataFeed):
"""The locales feed from Google Base.
This read-only feed defines the permitted locales for Google Base. The
locale value identifies the language, currency, and date formats used in a
feed.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
def GBaseLocalesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseLocalesFeed, xml_string)
class GBaseItemTypesFeed(gdata.GDataFeed):
"""A feed from the Google Base item types feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItemTypeEntry])
def GBaseItemTypesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemTypesFeed, xml_string)

View File

@ -0,0 +1,256 @@
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GBaseService extends the GDataService to streamline Google Base operations.
GBaseService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import urllib
import gdata
import atom.service
import gdata.service
import gdata.base
import atom
# URL to which all batch requests are sent.
BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch'
class Error(Exception):
pass
class RequestError(Error):
pass
class GBaseService(gdata.service.GDataService):
"""Client for the Google Base service."""
def __init__(self, email=None, password=None, source=None,
server='base.google.com', api_key=None, additional_headers=None,
handler=None, **kwargs):
"""Creates a client for the Google Base service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'base.google.com'.
api_key: string (optional) The Google Base API key to use.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='gbase', source=source,
server=server, additional_headers=additional_headers, handler=handler,
**kwargs)
self.api_key = api_key
def _SetAPIKey(self, api_key):
if not isinstance(self.additional_headers, dict):
self.additional_headers = {}
self.additional_headers['X-Google-Key'] = api_key
def __SetAPIKey(self, api_key):
self._SetAPIKey(api_key)
def _GetAPIKey(self):
if 'X-Google-Key' not in self.additional_headers:
return None
else:
return self.additional_headers['X-Google-Key']
def __GetAPIKey(self):
return self._GetAPIKey()
api_key = property(__GetAPIKey, __SetAPIKey,
doc="""Get or set the API key to be included in all requests.""")
def Query(self, uri, converter=None):
"""Performs a style query and returns a resulting feed or entry.
Args:
uri: string The full URI which be queried. Examples include
'/base/feeds/snippets?bq=digital+camera',
'http://www.google.com/base/feeds/snippets?bq=digital+camera'
'/base/feeds/items'
I recommend creating a URI using a query class.
converter: func (optional) A function which will be executed on the
server's response. Examples include GBaseItemFromString, etc.
Returns:
If converter was specified, returns the results of calling converter on
the server's response. If converter was not specified, and the result
was an Atom Entry, returns a GBaseItem, by default, the method returns
the result of calling gdata.service's Get method.
"""
result = self.Get(uri, converter=converter)
if converter:
return result
elif isinstance(result, atom.Entry):
return gdata.base.GBaseItemFromString(result.ToString())
return result
def QuerySnippetsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFeedFromString)
def QueryItemsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFeedFromString)
def QueryAttributesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributesFeedFromString)
def QueryItemTypesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypesFeedFromString)
def QueryLocalesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseLocalesFeedFromString)
def GetItem(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFromString)
def GetSnippet(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFromString)
def GetAttribute(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributeEntryFromString)
def GetItemType(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypeEntryFromString)
def GetLocale(self, uri):
return self.Get(uri, converter=gdata.base.GDataEntryFromString)
def InsertItem(self, new_item, url_params=None, escape_params=True,
converter=None):
"""Adds an item to Google Base.
Args:
new_item: atom.Entry or subclass A new item which is to be added to
Google Base.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Post(new_item, '/base/feeds/items', url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def DeleteItem(self, item_id, url_params=None, escape_params=True):
"""Removes an item with the specified ID from Google Base.
Args:
item_id: string The ID of the item to be deleted. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
True if the delete succeeded.
"""
return self.Delete('%s' % (item_id[len('http://www.google.com'):],),
url_params=url_params, escape_params=escape_params)
def UpdateItem(self, item_id, updated_item, url_params=None,
escape_params=True,
converter=gdata.base.GBaseItemFromString):
"""Updates an existing item.
Args:
item_id: string The ID of the item to be updated. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
updated_item: atom.Entry, subclass, or string, containing
the Atom Entry which will replace the base item which is
stored at the item_id.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Put(updated_item,
item_id, url_params=url_params, escape_params=escape_params,
converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def ExecuteBatch(self, batch_feed,
converter=gdata.base.GBaseItemFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which
contain the desired CRUD operation and any necessary entry data.
converter: Function (optional) Function to be executed on the server's
response. This function should take one string as a parameter. The
default value is GBaseItemFeedFromString which will turn the result
into a gdata.base.GBaseItem object.
Returns:
A gdata.BatchFeed containing the results.
"""
return self.Post(batch_feed, BASE_BATCH_URL, converter=converter)
class BaseQuery(gdata.service.Query):
def _GetBaseQuery(self):
return self['bq']
def _SetBaseQuery(self, base_query):
self['bq'] = base_query
bq = property(_GetBaseQuery, _SetBaseQuery,
doc="""The bq query parameter""")

View File

@ -0,0 +1,202 @@
#!/usr/bin/python
#
# Copyright (C) 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Blogger."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import atom
import gdata
import re
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0'
class BloggerEntry(gdata.GDataEntry):
"""Adds convenience methods inherited by all Blogger entries."""
blog_name_pattern = re.compile('(http://)(\w*)')
blog_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
blog_id2_pattern = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
def GetBlogId(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
match = self.blog_id_pattern.match(self.id.text)
if match:
return match.group(2)
else:
return self.blog_id2_pattern.match(self.id.text).group(2)
return None
def GetBlogName(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return self.blog_name_pattern.match(link.href).group(2)
return None
class BlogEntry(BloggerEntry):
"""Describes a blog entry in the feed listing a user's blogs."""
def BlogEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BlogEntry, xml_string)
class BlogFeed(gdata.GDataFeed):
"""Describes a feed of a user's blogs."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogEntry])
def BlogFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BlogFeed, xml_string)
class BlogPostEntry(BloggerEntry):
"""Describes a blog post entry in the feed of a blog's posts."""
post_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
def AddLabel(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.Category(scheme=LABEL_SCHEME, term=label))
def GetPostId(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return self.post_id_pattern.match(self.id.text).group(4)
return None
def BlogPostEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BlogPostEntry, xml_string)
class BlogPostFeed(gdata.GDataFeed):
"""Describes a feed of a blog's posts."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogPostEntry])
def BlogPostFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BlogPostFeed, xml_string)
class InReplyTo(atom.AtomBase):
_tag = 'in-reply-to'
_namespace = THR_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['href'] = 'href'
_attributes['ref'] = 'ref'
_attributes['source'] = 'source'
_attributes['type'] = 'type'
def __init__(self, href=None, ref=None, source=None, type=None,
extension_elements=None, extension_attributes=None, text=None):
self.href = href
self.ref = ref
self.source = source
self.type = type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def InReplyToFromString(xml_string):
return atom.CreateClassFromXMLString(InReplyTo, xml_string)
class CommentEntry(BloggerEntry):
"""Describes a blog post comment entry in the feed of a blog post's
comments."""
_children = BloggerEntry._children.copy()
_children['{%s}in-reply-to' % THR_NAMESPACE] = ('in_reply_to', InReplyTo)
comment_id_pattern = re.compile('.*-(\w*)$')
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
in_reply_to=None, extension_elements=None, extension_attributes=None,
text=None):
BloggerEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id, link=link,
published=published, rights=rights, source=source, summary=summary,
control=control, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
self.in_reply_to = in_reply_to
def GetCommentId(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return self.comment_id_pattern.match(self.id.text).group(1)
return None
def CommentEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CommentEntry, xml_string)
class CommentFeed(gdata.GDataFeed):
"""Describes a feed of a blog post's comments."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CommentEntry])
def CommentFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CommentFeed, xml_string)

View File

@ -0,0 +1,175 @@
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a client to communicate with the Blogger servers.
For documentation on the Blogger API, see:
http://code.google.com/apis/blogger/
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import gdata.client
import gdata.gauth
import gdata.blogger.data
import atom.data
import atom.http_core
# List user's blogs, takes a user ID, or 'default'.
BLOGS_URL = 'http://www.blogger.com/feeds/%s/blogs'
# Takes a blog ID.
BLOG_POST_URL = 'http://www.blogger.com/feeds/%s/posts/default'
# Takes a blog ID.
BLOG_PAGE_URL = 'http://www.blogger.com/feeds/%s/pages/default'
# Takes a blog ID and post ID.
BLOG_POST_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/%s/comments/default'
# Takes a blog ID.
BLOG_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/comments/default'
# Takes a blog ID.
BLOG_ARCHIVE_URL = 'http://www.blogger.com/feeds/%s/archive/full'
class BloggerClient(gdata.client.GDClient):
api_version = '2'
auth_service = 'blogger'
auth_scopes = gdata.gauth.AUTH_SCOPES['blogger']
def get_blogs(self, user_id='default', auth_token=None,
desired_class=gdata.blogger.data.BlogFeed, **kwargs):
return self.get_feed(BLOGS_URL % user_id, auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetBlogs = get_blogs
def get_posts(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPostFeed, query=None,
**kwargs):
return self.get_feed(BLOG_POST_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPosts = get_posts
def get_pages(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPageFeed, query=None,
**kwargs):
return self.get_feed(BLOG_PAGE_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPages = get_pages
def get_post_comments(self, blog_id, post_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, desired_class=desired_class,
query=query, **kwargs)
GetPostComments = get_post_comments
def get_blog_comments(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_COMMENTS_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetBlogComments = get_blog_comments
def get_blog_archive(self, blog_id, auth_token=None, **kwargs):
return self.get_feed(BLOG_ARCHIVE_URL % blog_id, auth_token=auth_token,
**kwargs)
GetBlogArchive = get_blog_archive
def add_post(self, blog_id, title, body, labels=None, draft=False,
auth_token=None, title_type='text', body_type='html', **kwargs):
# Construct an atom Entry for the blog post to be sent to the server.
new_entry = gdata.blogger.data.BlogPost(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if labels:
for label in labels:
new_entry.add_label(label)
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_POST_URL % blog_id, auth_token=auth_token, **kwargs)
AddPost = add_post
def add_page(self, blog_id, title, body, draft=False, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.BlogPage(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_PAGE_URL % blog_id, auth_token=auth_token, **kwargs)
AddPage = add_page
def add_comment(self, blog_id, post_id, body, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.Comment(
content=atom.data.Content(text=body, type=body_type))
return self.post(new_entry, BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, **kwargs)
AddComment = add_comment
def update(self, entry, auth_token=None, **kwargs):
# The Blogger API does not currently support ETags, so for now remove
# the ETag before performing an update.
old_etag = entry.etag
entry.etag = None
response = gdata.client.GDClient.update(self, entry,
auth_token=auth_token, **kwargs)
entry.etag = old_etag
return response
Update = update
def delete(self, entry_or_uri, auth_token=None, **kwargs):
if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)):
return gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
# The Blogger API does not currently support ETags, so for now remove
# the ETag before performing a delete.
old_etag = entry_or_uri.etag
entry_or_uri.etag = None
response = gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
# TODO: if GDClient.delete raises and exception, the entry's etag may be
# left as None. Should revisit this logic.
entry_or_uri.etag = old_etag
return response
Delete = delete
class Query(gdata.client.Query):
def __init__(self, order_by=None, **kwargs):
gdata.client.Query.__init__(self, **kwargs)
self.order_by = order_by
def modify_request(self, http_request):
gdata.client._add_query_param('orderby', self.order_by, http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request

View File

@ -0,0 +1,168 @@
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Blogger API."""
__author__ = 'j.s@google.com (Jeff Scudder)'
import re
import urlparse
import atom.core
import gdata.data
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s'
BLOG_NAME_PATTERN = re.compile('(http://)(\w*)')
BLOG_ID_PATTERN = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
BLOG_ID2_PATTERN = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
POST_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
PAGE_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.page-)(\w*)')
COMMENT_ID_PATTERN = re.compile('.*-(\w*)$')
class BloggerEntry(gdata.data.GDEntry):
"""Adds convenience methods inherited by all Blogger entries."""
def get_blog_id(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
match = BLOG_ID_PATTERN.match(self.id.text)
if match:
return match.group(2)
else:
return BLOG_ID2_PATTERN.match(self.id.text).group(2)
return None
GetBlogId = get_blog_id
def get_blog_name(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return urlparse.urlparse(link.href)[1].split(".", 1)[0]
return None
GetBlogName = get_blog_name
class Blog(BloggerEntry):
"""Represents a blog which belongs to the user."""
class BlogFeed(gdata.data.GDFeed):
entry = [Blog]
class BlogPost(BloggerEntry):
"""Represents a single post on a blog."""
def add_label(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.data.Category(scheme=LABEL_SCHEME, term=label))
AddLabel = add_label
def get_post_id(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return POST_ID_PATTERN.match(self.id.text).group(4)
return None
GetPostId = get_post_id
class BlogPostFeed(gdata.data.GDFeed):
entry = [BlogPost]
class BlogPage(BloggerEntry):
"""Represents a single page on a blog."""
def get_page_id(self):
"""Extracts the pageID string from entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return PAGE_ID_PATTERN.match(self.id.text).group(4)
return None
GetPageId = get_page_id
class BlogPageFeed(gdata.data.GDFeed):
entry = [BlogPage]
class InReplyTo(atom.core.XmlElement):
_qname = THR_TEMPLATE % 'in-reply-to'
href = 'href'
ref = 'ref'
source = 'source'
type = 'type'
class Comment(BloggerEntry):
"""Blog post comment entry in a feed listing comments on a post or blog."""
in_reply_to = InReplyTo
def get_comment_id(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return COMMENT_ID_PATTERN.match(self.id.text).group(1)
return None
GetCommentId = get_comment_id
class CommentFeed(gdata.data.GDFeed):
entry = [Comment]

View File

@ -0,0 +1,142 @@
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to interact with the Blogger server."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import gdata.service
import gdata.blogger
class BloggerService(gdata.service.GDataService):
def __init__(self, email=None, password=None, source=None,
server='www.blogger.com', **kwargs):
"""Creates a client for the Blogger service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.blogger.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='blogger', source=source,
server=server, **kwargs)
def GetBlogFeed(self, uri=None):
"""Retrieve a list of the blogs to which the current user may manage."""
if not uri:
uri = '/feeds/default/blogs'
return self.Get(uri, converter=gdata.blogger.BlogFeedFromString)
def GetBlogCommentFeed(self, blog_id=None, uri=None):
"""Retrieve a list of the comments for this blog."""
if blog_id:
uri = '/feeds/%s/comments/default' % blog_id
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
def GetBlogPostFeed(self, blog_id=None, uri=None):
if blog_id:
uri = '/feeds/%s/posts/default' % blog_id
return self.Get(uri, converter=gdata.blogger.BlogPostFeedFromString)
def GetPostCommentFeed(self, blog_id=None, post_id=None, uri=None):
"""Retrieve a list of the comments for this particular blog post."""
if blog_id and post_id:
uri = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
def AddPost(self, entry, blog_id=None, uri=None):
if blog_id:
uri = '/feeds/%s/posts/default' % blog_id
return self.Post(entry, uri,
converter=gdata.blogger.BlogPostEntryFromString)
def UpdatePost(self, entry, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Put(entry, uri,
converter=gdata.blogger.BlogPostEntryFromString)
def DeletePost(self, entry=None, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Delete(uri)
def AddComment(self, comment_entry, blog_id=None, post_id=None, uri=None):
"""Adds a new comment to the specified blog post."""
if blog_id and post_id:
uri = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
return self.Post(comment_entry, uri,
converter=gdata.blogger.CommentEntryFromString)
def DeleteComment(self, entry=None, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Delete(uri)
class BlogQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None):
"""Constructs a query object for the list of a user's Blogger blogs.
Args:
feed: str (optional) The beginning of the URL to be queried. If the
feed is not set, and there is no blog_id passed in, the default
value is used ('/feeds/default/blogs').
params: dict (optional)
categories: list (optional)
blog_id: str (optional)
"""
if not feed and blog_id:
feed = '/feeds/default/blogs/%s' % blog_id
elif not feed:
feed = '/feeds/default/blogs'
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
class BlogPostQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
post_id=None):
if not feed and blog_id and post_id:
feed = '/feeds/%s/posts/default/%s' % (blog_id, post_id)
elif not feed and blog_id:
feed = '/feeds/%s/posts/default' % blog_id
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
class BlogCommentQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
post_id=None, comment_id=None):
if not feed and blog_id and comment_id:
feed = '/feeds/%s/comments/default/%s' % (blog_id, comment_id)
elif not feed and blog_id and post_id:
feed = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
elif not feed and blog_id:
feed = '/feeds/%s/comments/default' % blog_id
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)

View File

@ -0,0 +1,473 @@
#!/usr/bin/python
"""
Data Models for books.service
All classes can be instantiated from an xml string using their FromString
class method.
Notes:
* Book.title displays the first dc:title because the returned XML
repeats that datum as atom:title.
There is an undocumented gbs:openAccess element that is not parsed.
"""
__author__ = "James Sams <sams.james@gmail.com>"
__copyright__ = "Apache License v2.0"
import atom
import gdata
BOOK_SEARCH_NAMESPACE = 'http://schemas.google.com/books/2008'
DC_NAMESPACE = 'http://purl.org/dc/terms'
ANNOTATION_REL = "http://schemas.google.com/books/2008/annotation"
INFO_REL = "http://schemas.google.com/books/2008/info"
LABEL_SCHEME = "http://schemas.google.com/books/2008/labels"
PREVIEW_REL = "http://schemas.google.com/books/2008/preview"
THUMBNAIL_REL = "http://schemas.google.com/books/2008/thumbnail"
FULL_VIEW = "http://schemas.google.com/books/2008#view_all_pages"
PARTIAL_VIEW = "http://schemas.google.com/books/2008#view_partial"
NO_VIEW = "http://schemas.google.com/books/2008#view_no_pages"
UNKNOWN_VIEW = "http://schemas.google.com/books/2008#view_unknown"
EMBEDDABLE = "http://schemas.google.com/books/2008#embeddable"
NOT_EMBEDDABLE = "http://schemas.google.com/books/2008#not_embeddable"
class _AtomFromString(atom.AtomBase):
#@classmethod
def FromString(cls, s):
return atom.CreateClassFromXMLString(cls, s)
FromString = classmethod(FromString)
class Creator(_AtomFromString):
"""
The <dc:creator> element identifies an author-or more generally, an entity
responsible for creating the volume in question. Examples of a creator
include a person, an organization, or a service. In the case of
anthologies, proceedings, or other edited works, this field may be used to
indicate editors or other entities responsible for collecting the volume's
contents.
This element appears as a child of <entry>. If there are multiple authors or
contributors to the book, there may be multiple <dc:creator> elements in the
volume entry (one for each creator or contributor).
"""
_tag = 'creator'
_namespace = DC_NAMESPACE
class Date(_AtomFromString): #iso 8601 / W3CDTF profile
"""
The <dc:date> element indicates the publication date of the specific volume
in question. If the book is a reprint, this is the reprint date, not the
original publication date. The date is encoded according to the ISO-8601
standard (and more specifically, the W3CDTF profile).
The <dc:date> element can appear only as a child of <entry>.
Usually only the year or the year and the month are given.
YYYY-MM-DDThh:mm:ssTZD TZD = -hh:mm or +hh:mm
"""
_tag = 'date'
_namespace = DC_NAMESPACE
class Description(_AtomFromString):
"""
The <dc:description> element includes text that describes a book or book
result. In a search result feed, this may be a search result "snippet" that
contains the words around the user's search term. For a single volume feed,
this element may contain a synopsis of the book.
The <dc:description> element can appear only as a child of <entry>
"""
_tag = 'description'
_namespace = DC_NAMESPACE
class Format(_AtomFromString):
"""
The <dc:format> element describes the physical properties of the volume.
Currently, it indicates the number of pages in the book, but more
information may be added to this field in the future.
This element can appear only as a child of <entry>.
"""
_tag = 'format'
_namespace = DC_NAMESPACE
class Identifier(_AtomFromString):
"""
The <dc:identifier> element provides an unambiguous reference to a
particular book.
* Every <entry> contains at least one <dc:identifier> child.
* The first identifier is always the unique string Book Search has assigned
to the volume (such as s1gVAAAAYAAJ). This is the ID that appears in the
book's URL in the Book Search GUI, as well as in the URL of that book's
single item feed.
* Many books contain additional <dc:identifier> elements. These provide
alternate, external identifiers to the volume. Such identifiers may
include the ISBNs, ISSNs, Library of Congress Control Numbers (LCCNs),
and OCLC numbers; they are prepended with a corresponding namespace
prefix (such as "ISBN:").
* Any <dc:identifier> can be passed to the Dynamic Links, used to
instantiate an Embedded Viewer, or even used to construct static links to
Book Search.
The <dc:identifier> element can appear only as a child of <entry>.
"""
_tag = 'identifier'
_namespace = DC_NAMESPACE
class Publisher(_AtomFromString):
"""
The <dc:publisher> element contains the name of the entity responsible for
producing and distributing the volume (usually the specific edition of this
book). Examples of a publisher include a person, an organization, or a
service.
This element can appear only as a child of <entry>. If there is more than
one publisher, multiple <dc:publisher> elements may appear.
"""
_tag = 'publisher'
_namespace = DC_NAMESPACE
class Subject(_AtomFromString):
"""
The <dc:subject> element identifies the topic of the book. Usually this is
a Library of Congress Subject Heading (LCSH) or Book Industry Standards
and Communications Subject Heading (BISAC).
The <dc:subject> element can appear only as a child of <entry>. There may
be multiple <dc:subject> elements per entry.
"""
_tag = 'subject'
_namespace = DC_NAMESPACE
class Title(_AtomFromString):
"""
The <dc:title> element contains the title of a book as it was published. If
a book has a subtitle, it appears as a second <dc:title> element in the book
result's <entry>.
"""
_tag = 'title'
_namespace = DC_NAMESPACE
class Viewability(_AtomFromString):
"""
Google Book Search respects the user's local copyright restrictions. As a
result, previews or full views of some books are not available in all
locations. The <gbs:viewability> element indicates whether a book is fully
viewable, can be previewed, or only has "about the book" information. These
three "viewability modes" are the same ones returned by the Dynamic Links
API.
The <gbs:viewability> element can appear only as a child of <entry>.
The value attribute will take the form of the following URIs to represent
the relevant viewing capability:
Full View: http://schemas.google.com/books/2008#view_all_pages
Limited Preview: http://schemas.google.com/books/2008#view_partial
Snippet View/No Preview: http://schemas.google.com/books/2008#view_no_pages
Unknown view: http://schemas.google.com/books/2008#view_unknown
"""
_tag = 'viewability'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, text=None,
extension_elements=None, extension_attributes=None):
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Embeddability(_AtomFromString):
"""
Many of the books found on Google Book Search can be embedded on third-party
sites using the Embedded Viewer. The <gbs:embeddability> element indicates
whether a particular book result is available for embedding. By definition,
a book that cannot be previewed on Book Search cannot be embedded on third-
party sites.
The <gbs:embeddability> element can appear only as a child of <entry>.
The value attribute will take on one of the following URIs:
embeddable: http://schemas.google.com/books/2008#embeddable
not embeddable: http://schemas.google.com/books/2008#not_embeddable
"""
_tag = 'embeddability'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, text=None, extension_elements=None,
extension_attributes=None):
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Review(_AtomFromString):
"""
When present, the <gbs:review> element contains a user-generated review for
a given book. This element currently appears only in the user library and
user annotation feeds, as a child of <entry>.
type: text, html, xhtml
xml:lang: id of the language, a guess, (always two letters?)
"""
_tag = 'review'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
_attributes['{http://www.w3.org/XML/1998/namespace}lang'] = 'lang'
def __init__(self, type=None, lang=None, text=None,
extension_elements=None, extension_attributes=None):
self.type = type
self.lang = lang
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Rating(_AtomFromString):
"""All attributes must take an integral string between 1 and 5.
The min, max, and average attributes represent 'community' ratings. The
value attribute is the user's (of the feed from which the item is fetched,
not necessarily the authenticated user) rating of the book.
"""
_tag = 'rating'
_namespace = gdata.GDATA_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['min'] = 'min'
_attributes['max'] = 'max'
_attributes['average'] = 'average'
_attributes['value'] = 'value'
def __init__(self, min=None, max=None, average=None, value=None, text=None,
extension_elements=None, extension_attributes=None):
self.min = min
self.max = max
self.average = average
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Book(_AtomFromString, gdata.GDataEntry):
"""
Represents an <entry> from either a search, annotation, library, or single
item feed. Note that dc_title attribute is the proper title of the volume,
title is an atom element and may not represent the full title.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
for i in (Creator, Identifier, Publisher, Subject,):
_children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, [i])
for i in (Date, Description, Format, Viewability, Embeddability,
Review, Rating): # Review, Rating maybe only in anno/lib entrys
_children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, i)
# there is an atom title as well, should we clobber that?
del(i)
_children['{%s}%s' % (Title._namespace, Title._tag)] = ('dc_title', [Title])
def to_dict(self):
"""Returns a dictionary of the book's available metadata. If the data
cannot be discovered, it is not included as a key in the returned dict.
The possible keys are: authors, embeddability, date, description,
format, identifiers, publishers, rating, review, subjects, title, and
viewability.
Notes:
* Plural keys will be lists
* Singular keys will be strings
* Title, despite usually being a list, joins the title and subtitle
with a space as a single string.
* embeddability and viewability only return the portion of the URI
after #
* identifiers is a list of tuples, where the first item of each tuple
is the type of identifier and the second item is the identifying
string. Note that while doing dict() on this tuple may be possible,
some items may have multiple of the same identifier and converting
to a dict may resulted in collisions/dropped data.
* Rating returns only the user's rating. See Rating class for precise
definition.
"""
d = {}
if self.GetAnnotationLink():
d['annotation'] = self.GetAnnotationLink().href
if self.creator:
d['authors'] = [x.text for x in self.creator]
if self.embeddability:
d['embeddability'] = self.embeddability.value.split('#')[-1]
if self.date:
d['date'] = self.date.text
if self.description:
d['description'] = self.description.text
if self.format:
d['format'] = self.format.text
if self.identifier:
d['identifiers'] = [('google_id', self.identifier[0].text)]
for x in self.identifier[1:]:
l = x.text.split(':') # should we lower the case of the ids?
d['identifiers'].append((l[0], ':'.join(l[1:])))
if self.GetInfoLink():
d['info'] = self.GetInfoLink().href
if self.GetPreviewLink():
d['preview'] = self.GetPreviewLink().href
if self.publisher:
d['publishers'] = [x.text for x in self.publisher]
if self.rating:
d['rating'] = self.rating.value
if self.review:
d['review'] = self.review.text
if self.subject:
d['subjects'] = [x.text for x in self.subject]
if self.GetThumbnailLink():
d['thumbnail'] = self.GetThumbnailLink().href
if self.dc_title:
d['title'] = ' '.join([x.text for x in self.dc_title])
if self.viewability:
d['viewability'] = self.viewability.value.split('#')[-1]
return d
def __init__(self, creator=None, date=None,
description=None, format=None, author=None, identifier=None,
publisher=None, subject=None, dc_title=None, viewability=None,
embeddability=None, review=None, rating=None, category=None,
content=None, contributor=None, atom_id=None, link=None,
published=None, rights=None, source=None, summary=None,
title=None, control=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
self.creator = creator
self.date = date
self.description = description
self.format = format
self.identifier = identifier
self.publisher = publisher
self.subject = subject
self.dc_title = dc_title or []
self.viewability = viewability
self.embeddability = embeddability
self.review = review
self.rating = rating
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id,
link=link, published=published, rights=rights, source=source,
summary=summary, title=title, control=control, updated=updated,
text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
def GetThumbnailLink(self):
"""Returns the atom.Link object representing the thumbnail URI."""
for i in self.link:
if i.rel == THUMBNAIL_REL:
return i
def GetInfoLink(self):
"""
Returns the atom.Link object representing the human-readable info URI.
"""
for i in self.link:
if i.rel == INFO_REL:
return i
def GetPreviewLink(self):
"""Returns the atom.Link object representing the preview URI."""
for i in self.link:
if i.rel == PREVIEW_REL:
return i
def GetAnnotationLink(self):
"""
Returns the atom.Link object representing the Annotation URI.
Note that the use of www.books in the href of this link seems to make
this information useless. Using books.service.ANNOTATION_FEED and
BOOK_SERVER to construct your URI seems to work better.
"""
for i in self.link:
if i.rel == ANNOTATION_REL:
return i
def set_rating(self, value):
"""Set user's rating. Must be an integral string between 1 nad 5"""
assert (value in ('1','2','3','4','5'))
if not isinstance(self.rating, Rating):
self.rating = Rating()
self.rating.value = value
def set_review(self, text, type='text', lang='en'):
"""Set user's review text"""
self.review = Review(text=text, type=type, lang=lang)
def get_label(self):
"""Get users label for the item as a string"""
for i in self.category:
if i.scheme == LABEL_SCHEME:
return i.term
def set_label(self, term):
"""Clear pre-existing label for the item and set term as the label."""
self.remove_label()
self.category.append(atom.Category(term=term, scheme=LABEL_SCHEME))
def remove_label(self):
"""Clear the user's label for the item"""
ln = len(self.category)
for i, j in enumerate(self.category[::-1]):
if j.scheme == LABEL_SCHEME:
del(self.category[ln-1-i])
def clean_annotations(self):
"""Clear all annotations from an item. Useful for taking an item from
another user's library/annotation feed and adding it to the
authenticated user's library without adopting annotations."""
self.remove_label()
self.review = None
self.rating = None
def get_google_id(self):
"""Get Google's ID of the item."""
return self.id.text.split('/')[-1]
class BookFeed(_AtomFromString, gdata.GDataFeed):
"""Represents a feed of entries from a search."""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}%s' % (Book._namespace, Book._tag)] = (Book._tag, [Book])
if __name__ == '__main__':
import doctest
doctest.testfile('datamodels.txt')

View File

@ -0,0 +1,90 @@
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Book Search Data API"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import atom.data
import gdata.data
import gdata.dublincore.data
import gdata.opensearch.data
GBS_TEMPLATE = '{http://schemas.google.com/books/2008/}%s'
class CollectionEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of collections."""
class CollectionFeed(gdata.data.BatchFeed):
"""Describes a Book Search collection feed."""
entry = [CollectionEntry]
class Embeddability(atom.core.XmlElement):
"""Describes an embeddability."""
_qname = GBS_TEMPLATE % 'embeddability'
value = 'value'
class OpenAccess(atom.core.XmlElement):
"""Describes an open access."""
_qname = GBS_TEMPLATE % 'openAccess'
value = 'value'
class Review(atom.core.XmlElement):
"""User-provided review."""
_qname = GBS_TEMPLATE % 'review'
lang = 'lang'
type = 'type'
class Viewability(atom.core.XmlElement):
"""Describes a viewability."""
_qname = GBS_TEMPLATE % 'viewability'
value = 'value'
class VolumeEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of Book Search volumes."""
comments = gdata.data.Comments
language = [gdata.dublincore.data.Language]
open_access = OpenAccess
format = [gdata.dublincore.data.Format]
dc_title = [gdata.dublincore.data.Title]
viewability = Viewability
embeddability = Embeddability
creator = [gdata.dublincore.data.Creator]
rating = gdata.data.Rating
description = [gdata.dublincore.data.Description]
publisher = [gdata.dublincore.data.Publisher]
date = [gdata.dublincore.data.Date]
subject = [gdata.dublincore.data.Subject]
identifier = [gdata.dublincore.data.Identifier]
review = Review
class VolumeFeed(gdata.data.BatchFeed):
"""Describes a Book Search volume feed."""
entry = [VolumeEntry]

View File

@ -0,0 +1,266 @@
#!/usr/bin/python
"""
Extend gdata.service.GDataService to support authenticated CRUD ops on
Books API
http://code.google.com/apis/books/docs/getting-started.html
http://code.google.com/apis/books/docs/gdata/developers_guide_protocol.html
TODO: (here and __init__)
* search based on label, review, or other annotations (possible?)
* edit (specifically, Put requests) seem to fail effect a change
Problems With API:
* Adding a book with a review to the library adds a note, not a review.
This does not get included in the returned item. You see this by
looking at My Library through the website.
* Editing a review never edits a review (unless it is freshly added, but
see above). More generally,
* a Put request with changed annotations (label/rating/review) does NOT
change the data. Note: Put requests only work on the href from
GetEditLink (as per the spec). Do not try to PUT to the annotate or
library feeds, this will cause a 400 Invalid URI Bad Request response.
Attempting to Post to one of the feeds with the updated annotations
does not update them. See the following for (hopefully) a follow up:
google.com/support/forum/p/booksearch-apis/thread?tid=27fd7f68de438fc8
* Attempts to workaround the edit problem continue to fail. For example,
removing the item, editing the data, readding the item, gives us only
our originally added data (annotations). This occurs even if we
completely shut python down, refetch the book from the public feed,
and re-add it. There is some kind of persistence going on that I
cannot change. This is likely due to the annotations being cached in
the annotation feed and the inability to edit (see Put, above)
* GetAnnotationLink has www.books.... as the server, but hitting www...
results in a bad URI error.
* Spec indicates there may be multiple labels, but there does not seem
to be a way to get the server to accept multiple labels, nor does the
web interface have an obvious way to have multiple labels. Multiple
labels are never returned.
"""
__author__ = "James Sams <sams.james@gmail.com>"
__copyright__ = "Apache License v2.0"
from shlex import split
import gdata.service
try:
import books
except ImportError:
import gdata.books as books
BOOK_SERVER = "books.google.com"
GENERAL_FEED = "/books/feeds/volumes"
ITEM_FEED = "/books/feeds/volumes/"
LIBRARY_FEED = "/books/feeds/users/%s/collections/library/volumes"
ANNOTATION_FEED = "/books/feeds/users/%s/volumes"
PARTNER_FEED = "/books/feeds/p/%s/volumes"
BOOK_SERVICE = "print"
ACCOUNT_TYPE = "HOSTED_OR_GOOGLE"
class BookService(gdata.service.GDataService):
def __init__(self, email=None, password=None, source=None,
server=BOOK_SERVER, account_type=ACCOUNT_TYPE,
exception_handlers=tuple(), **kwargs):
"""source should be of form 'ProgramCompany - ProgramName - Version'"""
gdata.service.GDataService.__init__(self, email=email,
password=password, service=BOOK_SERVICE, source=source,
server=server, **kwargs)
self.exception_handlers = exception_handlers
def search(self, q, start_index="1", max_results="10",
min_viewability="none", feed=GENERAL_FEED,
converter=books.BookFeed.FromString):
"""
Query the Public search feed. q is either a search string or a
gdata.service.Query instance with a query set.
min_viewability must be "none", "partial", or "full".
If you change the feed to a single item feed, note that you will
probably need to change the converter to be Book.FromString
"""
if not isinstance(q, gdata.service.Query):
q = gdata.service.Query(text_query=q)
if feed:
q.feed = feed
q['start-index'] = start_index
q['max-results'] = max_results
q['min-viewability'] = min_viewability
return self.Get(uri=q.ToUri(),converter=converter)
def search_by_keyword(self, q='', feed=GENERAL_FEED, start_index="1",
max_results="10", min_viewability="none", **kwargs):
"""
Query the Public Search Feed by keyword. Non-keyword strings can be
set in q. This is quite fragile. Is there a function somewhere in
the Google library that will parse a query the same way that Google
does?
Legal Identifiers are listed below and correspond to their meaning
at http://books.google.com/advanced_book_search:
all_words
exact_phrase
at_least_one
without_words
title
author
publisher
subject
isbn
lccn
oclc
seemingly unsupported:
publication_date: a sequence of two, two tuples:
((min_month,min_year),(max_month,max_year))
where month is one/two digit month, year is 4 digit, eg:
(('1','2000'),('10','2003')). Lower bound is inclusive,
upper bound is exclusive
"""
for k, v in kwargs.items():
if not v:
continue
k = k.lower()
if k == 'all_words':
q = "%s %s" % (q, v)
elif k == 'exact_phrase':
q = '%s "%s"' % (q, v.strip('"'))
elif k == 'at_least_one':
q = '%s %s' % (q, ' '.join(['OR "%s"' % x for x in split(v)]))
elif k == 'without_words':
q = '%s %s' % (q, ' '.join(['-"%s"' % x for x in split(v)]))
elif k in ('author','title', 'publisher'):
q = '%s %s' % (q, ' '.join(['in%s:"%s"'%(k,x) for x in split(v)]))
elif k == 'subject':
q = '%s %s' % (q, ' '.join(['%s:"%s"' % (k,x) for x in split(v)]))
elif k == 'isbn':
q = '%s ISBN%s' % (q, v)
elif k == 'issn':
q = '%s ISSN%s' % (q,v)
elif k == 'oclc':
q = '%s OCLC%s' % (q,v)
else:
raise ValueError("Unsupported search keyword")
return self.search(q.strip(),start_index=start_index, feed=feed,
max_results=max_results,
min_viewability=min_viewability)
def search_library(self, q, id='me', **kwargs):
"""Like search, but in a library feed. Default is the authenticated
user's feed. Change by setting id."""
if 'feed' in kwargs:
raise ValueError("kwarg 'feed' conflicts with library_id")
feed = LIBRARY_FEED % id
return self.search(q, feed=feed, **kwargs)
def search_library_by_keyword(self, id='me', **kwargs):
"""Hybrid of search_by_keyword and search_library
"""
if 'feed' in kwargs:
raise ValueError("kwarg 'feed' conflicts with library_id")
feed = LIBRARY_FEED % id
return self.search_by_keyword(feed=feed,**kwargs)
def search_annotations(self, q, id='me', **kwargs):
"""Like search, but in an annotation feed. Default is the authenticated
user's feed. Change by setting id."""
if 'feed' in kwargs:
raise ValueError("kwarg 'feed' conflicts with library_id")
feed = ANNOTATION_FEED % id
return self.search(q, feed=feed, **kwargs)
def search_annotations_by_keyword(self, id='me', **kwargs):
"""Hybrid of search_by_keyword and search_annotations
"""
if 'feed' in kwargs:
raise ValueError("kwarg 'feed' conflicts with library_id")
feed = ANNOTATION_FEED % id
return self.search_by_keyword(feed=feed,**kwargs)
def add_item_to_library(self, item):
"""Add the item, either an XML string or books.Book instance, to the
user's library feed"""
feed = LIBRARY_FEED % 'me'
return self.Post(data=item, uri=feed, converter=books.Book.FromString)
def remove_item_from_library(self, item):
"""
Remove the item, a books.Book instance, from the authenticated user's
library feed. Using an item retrieved from a public search will fail.
"""
return self.Delete(item.GetEditLink().href)
def add_annotation(self, item):
"""
Add the item, either an XML string or books.Book instance, to the
user's annotation feed.
"""
# do not use GetAnnotationLink, results in 400 Bad URI due to www
return self.Post(data=item, uri=ANNOTATION_FEED % 'me',
converter=books.Book.FromString)
def edit_annotation(self, item):
"""
Send an edited item, a books.Book instance, to the user's annotation
feed. Note that whereas extra annotations in add_annotations, minus
ratings which are immutable once set, are simply added to the item in
the annotation feed, if an annotation has been removed from the item,
sending an edit request will remove that annotation. This should not
happen with add_annotation.
"""
return self.Put(data=item, uri=item.GetEditLink().href,
converter=books.Book.FromString)
def get_by_google_id(self, id):
return self.Get(ITEM_FEED + id, converter=books.Book.FromString)
def get_library(self, id='me',feed=LIBRARY_FEED, start_index="1",
max_results="100", min_viewability="none",
converter=books.BookFeed.FromString):
"""
Return a generator object that will return gbook.Book instances until
the search feed no longer returns an item from the GetNextLink method.
Thus max_results is not the maximum number of items that will be
returned, but rather the number of items per page of searches. This has
been set high to reduce the required number of network requests.
"""
q = gdata.service.Query()
q.feed = feed % id
q['start-index'] = start_index
q['max-results'] = max_results
q['min-viewability'] = min_viewability
x = self.Get(uri=q.ToUri(), converter=converter)
while 1:
for entry in x.entry:
yield entry
else:
l = x.GetNextLink()
if l: # hope the server preserves our preferences
x = self.Get(uri=l.href, converter=converter)
else:
break
def get_annotations(self, id='me', start_index="1", max_results="100",
min_viewability="none", converter=books.BookFeed.FromString):
"""
Like get_library, but for the annotation feed
"""
return self.get_library(id=id, feed=ANNOTATION_FEED,
max_results=max_results, min_viewability = min_viewability,
converter=converter)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,300 @@
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Calendar Data API"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import atom.data
import gdata.acl.data
import gdata.data
import gdata.geo.data
import gdata.opensearch.data
GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005/}%s'
class AccessLevelProperty(atom.core.XmlElement):
"""Describes how much a given user may do with an event or calendar"""
_qname = GCAL_TEMPLATE % 'accesslevel'
value = 'value'
class AllowGSync2Property(atom.core.XmlElement):
"""Whether the user is permitted to run Google Apps Sync"""
_qname = GCAL_TEMPLATE % 'allowGSync2'
value = 'value'
class AllowGSyncProperty(atom.core.XmlElement):
"""Whether the user is permitted to run Google Apps Sync"""
_qname = GCAL_TEMPLATE % 'allowGSync'
value = 'value'
class AnyoneCanAddSelfProperty(atom.core.XmlElement):
"""Whether anyone can add self as attendee"""
_qname = GCAL_TEMPLATE % 'anyoneCanAddSelf'
value = 'value'
class CalendarAclRole(gdata.acl.data.AclRole):
"""Describes the Calendar roles of an entry in the Calendar access control list"""
_qname = gdata.acl.data.GACL_TEMPLATE % 'role'
class CalendarCommentEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of a Calendar event's comments"""
class CalendarCommentFeed(gdata.data.GDFeed):
"""Describes feed of a Calendar event's comments"""
entry = [CalendarCommentEntry]
class CalendarComments(gdata.data.Comments):
"""Describes a container of a feed link for Calendar comment entries"""
_qname = gdata.data.GD_TEMPLATE % 'comments'
class CalendarExtendedProperty(gdata.data.ExtendedProperty):
"""Defines a value for the realm attribute that is used only in the calendar API"""
_qname = gdata.data.GD_TEMPLATE % 'extendedProperty'
class CalendarWhere(gdata.data.Where):
"""Extends the base Where class with Calendar extensions"""
_qname = gdata.data.GD_TEMPLATE % 'where'
class ColorProperty(atom.core.XmlElement):
"""Describes the color of a calendar"""
_qname = GCAL_TEMPLATE % 'color'
value = 'value'
class GuestsCanInviteOthersProperty(atom.core.XmlElement):
"""Whether guests can invite others to the event"""
_qname = GCAL_TEMPLATE % 'guestsCanInviteOthers'
value = 'value'
class GuestsCanModifyProperty(atom.core.XmlElement):
"""Whether guests can modify event"""
_qname = GCAL_TEMPLATE % 'guestsCanModify'
value = 'value'
class GuestsCanSeeGuestsProperty(atom.core.XmlElement):
"""Whether guests can see other attendees"""
_qname = GCAL_TEMPLATE % 'guestsCanSeeGuests'
value = 'value'
class HiddenProperty(atom.core.XmlElement):
"""Describes whether a calendar is hidden"""
_qname = GCAL_TEMPLATE % 'hidden'
value = 'value'
class IcalUIDProperty(atom.core.XmlElement):
"""Describes the UID in the ical export of the event"""
_qname = GCAL_TEMPLATE % 'uid'
value = 'value'
class OverrideNameProperty(atom.core.XmlElement):
"""Describes the override name property of a calendar"""
_qname = GCAL_TEMPLATE % 'overridename'
value = 'value'
class PrivateCopyProperty(atom.core.XmlElement):
"""Indicates whether this is a private copy of the event, changes to which should not be sent to other calendars"""
_qname = GCAL_TEMPLATE % 'privateCopy'
value = 'value'
class QuickAddProperty(atom.core.XmlElement):
"""Describes whether gd:content is for quick-add processing"""
_qname = GCAL_TEMPLATE % 'quickadd'
value = 'value'
class ResourceProperty(atom.core.XmlElement):
"""Describes whether gd:who is a resource such as a conference room"""
_qname = GCAL_TEMPLATE % 'resource'
value = 'value'
id = 'id'
class EventWho(gdata.data.Who):
"""Extends the base Who class with Calendar extensions"""
_qname = gdata.data.GD_TEMPLATE % 'who'
resource = ResourceProperty
class SelectedProperty(atom.core.XmlElement):
"""Describes whether a calendar is selected"""
_qname = GCAL_TEMPLATE % 'selected'
value = 'value'
class SendAclNotificationsProperty(atom.core.XmlElement):
"""Describes whether to send ACL notifications to grantees"""
_qname = GCAL_TEMPLATE % 'sendAclNotifications'
value = 'value'
class CalendarAclEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of a Calendar access control list (ACL)"""
send_acl_notifications = SendAclNotificationsProperty
class CalendarAclFeed(gdata.data.GDFeed):
"""Describes a Calendar access contorl list (ACL) feed"""
entry = [CalendarAclEntry]
class SendEventNotificationsProperty(atom.core.XmlElement):
"""Describes whether to send event notifications to other participants of the event"""
_qname = GCAL_TEMPLATE % 'sendEventNotifications'
value = 'value'
class SequenceNumberProperty(atom.core.XmlElement):
"""Describes sequence number of an event"""
_qname = GCAL_TEMPLATE % 'sequence'
value = 'value'
class CalendarRecurrenceExceptionEntry(gdata.data.GDEntry):
"""Describes an entry used by a Calendar recurrence exception entry link"""
uid = IcalUIDProperty
sequence = SequenceNumberProperty
class CalendarRecurrenceException(gdata.data.RecurrenceException):
"""Describes an exception to a recurring Calendar event"""
_qname = gdata.data.GD_TEMPLATE % 'recurrenceException'
class SettingsProperty(atom.core.XmlElement):
"""User preference name-value pair"""
_qname = GCAL_TEMPLATE % 'settingsProperty'
name = 'name'
value = 'value'
class SettingsEntry(gdata.data.GDEntry):
"""Describes a Calendar Settings property entry"""
settings_property = SettingsProperty
class CalendarSettingsFeed(gdata.data.GDFeed):
"""Personal settings for Calendar application"""
entry = [SettingsEntry]
class SuppressReplyNotificationsProperty(atom.core.XmlElement):
"""Lists notification methods to be suppressed for this reply"""
_qname = GCAL_TEMPLATE % 'suppressReplyNotifications'
methods = 'methods'
class SyncEventProperty(atom.core.XmlElement):
"""Describes whether this is a sync scenario where the Ical UID and Sequence number are honored during inserts and updates"""
_qname = GCAL_TEMPLATE % 'syncEvent'
value = 'value'
class CalendarEventEntry(gdata.data.BatchEntry):
"""Describes a Calendar event entry"""
quickadd = QuickAddProperty
send_event_notifications = SendEventNotificationsProperty
sync_event = SyncEventProperty
anyone_can_add_self = AnyoneCanAddSelfProperty
extended_property = [CalendarExtendedProperty]
sequence = SequenceNumberProperty
guests_can_invite_others = GuestsCanInviteOthersProperty
guests_can_modify = GuestsCanModifyProperty
guests_can_see_guests = GuestsCanSeeGuestsProperty
georss_where = gdata.geo.data.GeoRssWhere
private_copy = PrivateCopyProperty
suppress_reply_notifications = SuppressReplyNotificationsProperty
uid = IcalUIDProperty
class TimeZoneProperty(atom.core.XmlElement):
"""Describes the time zone of a calendar"""
_qname = GCAL_TEMPLATE % 'timezone'
value = 'value'
class TimesCleanedProperty(atom.core.XmlElement):
"""Describes how many times calendar was cleaned via Manage Calendars"""
_qname = GCAL_TEMPLATE % 'timesCleaned'
value = 'value'
class CalendarEntry(gdata.data.GDEntry):
"""Describes a Calendar entry in the feed of a user's calendars"""
timezone = TimeZoneProperty
overridename = OverrideNameProperty
hidden = HiddenProperty
selected = SelectedProperty
times_cleaned = TimesCleanedProperty
color = ColorProperty
where = [CalendarWhere]
accesslevel = AccessLevelProperty
class CalendarEventFeed(gdata.data.BatchFeed):
"""Describes a Calendar event feed"""
allow_g_sync2 = AllowGSync2Property
timezone = TimeZoneProperty
entry = [CalendarEventEntry]
times_cleaned = TimesCleanedProperty
allow_g_sync = AllowGSyncProperty
class CalendarFeed(gdata.data.GDFeed):
"""Describes a feed of Calendars"""
entry = [CalendarEntry]
class WebContentGadgetPref(atom.core.XmlElement):
"""Describes a single web content gadget preference"""
_qname = GCAL_TEMPLATE % 'webContentGadgetPref'
name = 'name'
value = 'value'
class WebContent(atom.core.XmlElement):
"""Describes a "web content" extension"""
_qname = GCAL_TEMPLATE % 'webContent'
height = 'height'
width = 'width'
web_content_gadget_pref = [WebContentGadgetPref]
url = 'url'
display = 'display'

View File

@ -0,0 +1,595 @@
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CalendarService extends the GDataService to streamline Google Calendar operations.
CalendarService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'api.vli (Vivian Li)'
import urllib
import gdata
import atom.service
import gdata.service
import gdata.calendar
import atom
DEFAULT_BATCH_URL = ('http://www.google.com/calendar/feeds/default/private'
'/full/batch')
class Error(Exception):
pass
class RequestError(Error):
pass
class CalendarService(gdata.service.GDataService):
"""Client for the Google Calendar service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com', additional_headers=None, **kwargs):
"""Creates a client for the Google Calendar service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='cl', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetCalendarEventFeed(self, uri='/calendar/feeds/default/private/full'):
return self.Get(uri, converter=gdata.calendar.CalendarEventFeedFromString)
def GetCalendarEventEntry(self, uri):
return self.Get(uri, converter=gdata.calendar.CalendarEventEntryFromString)
def GetCalendarListFeed(self, uri='/calendar/feeds/default/allcalendars/full'):
return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString)
def GetAllCalendarsFeed(self, uri='/calendar/feeds/default/allcalendars/full'):
return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString)
def GetOwnCalendarsFeed(self, uri='/calendar/feeds/default/owncalendars/full'):
return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString)
def GetCalendarListEntry(self, uri):
return self.Get(uri, converter=gdata.calendar.CalendarListEntryFromString)
def GetCalendarAclFeed(self, uri='/calendar/feeds/default/acl/full'):
return self.Get(uri, converter=gdata.calendar.CalendarAclFeedFromString)
def GetCalendarAclEntry(self, uri):
return self.Get(uri, converter=gdata.calendar.CalendarAclEntryFromString)
def GetCalendarEventCommentFeed(self, uri):
return self.Get(uri, converter=gdata.calendar.CalendarEventCommentFeedFromString)
def GetCalendarEventCommentEntry(self, uri):
return self.Get(uri, converter=gdata.calendar.CalendarEventCommentEntryFromString)
def Query(self, uri, converter=None):
"""Performs a query and returns a resulting feed or entry.
Args:
feed: string The feed which is to be queried
Returns:
On success, a GDataFeed or Entry depending on which is sent from the
server.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
if converter:
result = self.Get(uri, converter=converter)
else:
result = self.Get(uri)
return result
def CalendarQuery(self, query):
if isinstance(query, CalendarEventQuery):
return self.Query(query.ToUri(),
converter=gdata.calendar.CalendarEventFeedFromString)
elif isinstance(query, CalendarListQuery):
return self.Query(query.ToUri(),
converter=gdata.calendar.CalendarListFeedFromString)
elif isinstance(query, CalendarEventCommentQuery):
return self.Query(query.ToUri(),
converter=gdata.calendar.CalendarEventCommentFeedFromString)
else:
return self.Query(query.ToUri())
def InsertEvent(self, new_event, insert_uri, url_params=None,
escape_params=True):
"""Adds an event to Google Calendar.
Args:
new_event: atom.Entry or subclass A new event which is to be added to
Google Calendar.
insert_uri: the URL to post new events to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the event created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Post(new_event, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.calendar.CalendarEventEntryFromString)
def InsertCalendarSubscription(self, calendar, url_params=None,
escape_params=True):
"""Subscribes the authenticated user to the provided calendar.
Args:
calendar: The calendar to which the user should be subscribed.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the subscription created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = '/calendar/feeds/default/allcalendars/full'
return self.Post(calendar, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.calendar.CalendarListEntryFromString)
def InsertCalendar(self, new_calendar, url_params=None,
escape_params=True):
"""Creates a new calendar.
Args:
new_calendar: The calendar to be created
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the calendar created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = '/calendar/feeds/default/owncalendars/full'
response = self.Post(new_calendar, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.calendar.CalendarListEntryFromString)
return response
def UpdateCalendar(self, calendar, url_params=None,
escape_params=True):
"""Updates a calendar.
Args:
calendar: The calendar which should be updated
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the calendar created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
update_uri = calendar.GetEditLink().href
response = self.Put(data=calendar, uri=update_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.calendar.CalendarListEntryFromString)
return response
def InsertAclEntry(self, new_entry, insert_uri, url_params=None,
escape_params=True):
"""Adds an ACL entry (rule) to Google Calendar.
Args:
new_entry: atom.Entry or subclass A new ACL entry which is to be added to
Google Calendar.
insert_uri: the URL to post new entries to the ACL feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the ACL entry created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Post(new_entry, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.calendar.CalendarAclEntryFromString)
def InsertEventComment(self, new_entry, insert_uri, url_params=None,
escape_params=True):
"""Adds an entry to Google Calendar.
Args:
new_entry: atom.Entry or subclass A new entry which is to be added to
Google Calendar.
insert_uri: the URL to post new entrys to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the comment created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Post(new_entry, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.calendar.CalendarEventCommentEntryFromString)
def _RemoveStandardUrlPrefix(self, url):
url_prefix = 'http://%s/' % self.server
if url.startswith(url_prefix):
return url[len(url_prefix) - 1:]
return url
def DeleteEvent(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an event with the specified ID from Google Calendar.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'http://www.google.com/calendar/feeds/default/private/full/abx'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
edit_uri = self._RemoveStandardUrlPrefix(edit_uri)
return self.Delete('%s' % edit_uri,
url_params=url_params, escape_params=escape_params)
def DeleteAclEntry(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an ACL entry at the given edit_uri from Google Calendar.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
edit_uri = self._RemoveStandardUrlPrefix(edit_uri)
return self.Delete('%s' % edit_uri,
url_params=url_params, escape_params=escape_params)
def DeleteCalendarEntry(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes a calendar entry at the given edit_uri from Google Calendar.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'http://www.google.com/calendar/feeds/default/allcalendars/abcdef@group.calendar.google.com'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, True is returned
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Delete(edit_uri, url_params=url_params,
escape_params=escape_params)
def UpdateEvent(self, edit_uri, updated_event, url_params=None,
escape_params=True):
"""Updates an existing event.
Args:
edit_uri: string The edit link URI for the element being updated
updated_event: string, atom.Entry, or subclass containing
the Atom Entry which will replace the event which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
edit_uri = self._RemoveStandardUrlPrefix(edit_uri)
return self.Put(updated_event, '%s' % edit_uri,
url_params=url_params,
escape_params=escape_params,
converter=gdata.calendar.CalendarEventEntryFromString)
def UpdateAclEntry(self, edit_uri, updated_rule, url_params=None,
escape_params=True):
"""Updates an existing ACL rule.
Args:
edit_uri: string The edit link URI for the element being updated
updated_rule: string, atom.Entry, or subclass containing
the Atom Entry which will replace the event which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
edit_uri = self._RemoveStandardUrlPrefix(edit_uri)
return self.Put(updated_rule, '%s' % edit_uri,
url_params=url_params,
escape_params=escape_params,
converter=gdata.calendar.CalendarAclEntryFromString)
def ExecuteBatch(self, batch_feed, url,
converter=gdata.calendar.CalendarEventFeedFromString):
"""Sends a batch request feed to the server.
The batch request needs to be sent to the batch URL for a particular
calendar. You can find the URL by calling GetBatchLink().href on the
CalendarEventFeed.
Args:
batch_feed: gdata.calendar.CalendarEventFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: str The batch URL for the Calendar to which these operations should
be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is
CalendarEventFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a CalendarEventFeed.
"""
return self.Post(batch_feed, url, converter=converter)
class CalendarEventQuery(gdata.service.Query):
def __init__(self, user='default', visibility='private', projection='full',
text_query=None, params=None, categories=None):
gdata.service.Query.__init__(self,
feed='http://www.google.com/calendar/feeds/%s/%s/%s' % (
urllib.quote(user),
urllib.quote(visibility),
urllib.quote(projection)),
text_query=text_query, params=params, categories=categories)
def _GetStartMin(self):
if 'start-min' in self.keys():
return self['start-min']
else:
return None
def _SetStartMin(self, val):
self['start-min'] = val
start_min = property(_GetStartMin, _SetStartMin,
doc="""The start-min query parameter""")
def _GetStartMax(self):
if 'start-max' in self.keys():
return self['start-max']
else:
return None
def _SetStartMax(self, val):
self['start-max'] = val
start_max = property(_GetStartMax, _SetStartMax,
doc="""The start-max query parameter""")
def _GetOrderBy(self):
if 'orderby' in self.keys():
return self['orderby']
else:
return None
def _SetOrderBy(self, val):
if val is not 'lastmodified' and val is not 'starttime':
raise Error, "Order By must be either 'lastmodified' or 'starttime'"
self['orderby'] = val
orderby = property(_GetOrderBy, _SetOrderBy,
doc="""The orderby query parameter""")
def _GetSortOrder(self):
if 'sortorder' in self.keys():
return self['sortorder']
else:
return None
def _SetSortOrder(self, val):
if (val is not 'ascending' and val is not 'descending'
and val is not 'a' and val is not 'd' and val is not 'ascend'
and val is not 'descend'):
raise Error, "Sort order must be either ascending, ascend, " + (
"a or descending, descend, or d")
self['sortorder'] = val
sortorder = property(_GetSortOrder, _SetSortOrder,
doc="""The sortorder query parameter""")
def _GetSingleEvents(self):
if 'singleevents' in self.keys():
return self['singleevents']
else:
return None
def _SetSingleEvents(self, val):
self['singleevents'] = val
singleevents = property(_GetSingleEvents, _SetSingleEvents,
doc="""The singleevents query parameter""")
def _GetFutureEvents(self):
if 'futureevents' in self.keys():
return self['futureevents']
else:
return None
def _SetFutureEvents(self, val):
self['futureevents'] = val
futureevents = property(_GetFutureEvents, _SetFutureEvents,
doc="""The futureevents query parameter""")
def _GetRecurrenceExpansionStart(self):
if 'recurrence-expansion-start' in self.keys():
return self['recurrence-expansion-start']
else:
return None
def _SetRecurrenceExpansionStart(self, val):
self['recurrence-expansion-start'] = val
recurrence_expansion_start = property(_GetRecurrenceExpansionStart,
_SetRecurrenceExpansionStart,
doc="""The recurrence-expansion-start query parameter""")
def _GetRecurrenceExpansionEnd(self):
if 'recurrence-expansion-end' in self.keys():
return self['recurrence-expansion-end']
else:
return None
def _SetRecurrenceExpansionEnd(self, val):
self['recurrence-expansion-end'] = val
recurrence_expansion_end = property(_GetRecurrenceExpansionEnd,
_SetRecurrenceExpansionEnd,
doc="""The recurrence-expansion-end query parameter""")
def _SetTimezone(self, val):
self['ctz'] = val
def _GetTimezone(self):
if 'ctz' in self.keys():
return self['ctz']
else:
return None
ctz = property(_GetTimezone, _SetTimezone,
doc="""The ctz query parameter which sets report time on the server.""")
class CalendarListQuery(gdata.service.Query):
"""Queries the Google Calendar meta feed"""
def __init__(self, userId=None, text_query=None,
params=None, categories=None):
if userId is None:
userId = 'default'
gdata.service.Query.__init__(self, feed='http://www.google.com/calendar/feeds/'
+userId,
text_query=text_query, params=params,
categories=categories)
class CalendarEventCommentQuery(gdata.service.Query):
"""Queries the Google Calendar event comments feed"""
def __init__(self, feed=None):
gdata.service.Query.__init__(self, feed=feed)

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,200 @@
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CalendarResourceClient simplifies Calendar Resources API calls.
CalendarResourceClient extends gdata.client.GDClient to ease interaction with
the Google Apps Calendar Resources API. These interactions include the ability
to create, retrieve, update, and delete calendar resources in a Google Apps
domain.
"""
__author__ = 'Vic Fryzel <vf@google.com>'
import gdata.calendar_resource.data
import gdata.client
import urllib
# Feed URI template. This must end with a /
# The strings in this template are eventually replaced with the API version
# and Google Apps domain name, respectively.
RESOURCE_FEED_TEMPLATE = '/a/feeds/calendar/resource/%s/%s/'
class CalendarResourceClient(gdata.client.GDClient):
"""Client extension for the Google Calendar Resource API service.
Attributes:
host: string The hostname for the Calendar Resouce API service.
api_version: string The version of the Calendar Resource API.
"""
host = 'apps-apis.google.com'
api_version = '2.0'
auth_service = 'apps'
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
ssl = True
def __init__(self, domain, auth_token=None, **kwargs):
"""Constructs a new client for the Calendar Resource API.
Args:
domain: string The Google Apps domain with Calendar Resources.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the calendar resource
data.
kwargs: The other parameters to pass to the gdata.client.GDClient
constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def make_resource_feed_uri(self, resource_id=None, params=None):
"""Creates a resource feed URI for the Calendar Resource API.
Using this client's Google Apps domain, create a feed URI for calendar
resources in that domain. If a resource_id is provided, return a URI
for that specific resource. If params are provided, append them as GET
params.
Args:
resource_id: string (optional) The ID of the calendar resource for which
to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for calendar resources for this client's Google
Apps domain.
"""
uri = RESOURCE_FEED_TEMPLATE % (self.api_version, self.domain)
if resource_id:
uri += resource_id
if params:
uri += '?' + urllib.urlencode(params)
return uri
MakeResourceFeedUri = make_resource_feed_uri
def get_resource_feed(self, uri=None, **kwargs):
"""Fetches a ResourceFeed of calendar resources at the given URI.
Args:
uri: string The URI of the feed to pull.
kwargs: The other parameters to pass to gdata.client.GDClient.get_feed().
Returns:
A ResourceFeed object representing the feed at the given URI.
"""
if uri is None:
uri = self.MakeResourceFeedUri()
return self.get_feed(
uri,
desired_class=gdata.calendar_resource.data.CalendarResourceFeed,
**kwargs)
GetResourceFeed = get_resource_feed
def get_resource(self, uri=None, resource_id=None, **kwargs):
"""Fetches a single calendar resource by resource ID.
Args:
uri: string The base URI of the feed from which to fetch the resource.
resource_id: string The string ID of the Resource to fetch.
kwargs: The other parameters to pass to gdata.client.GDClient.get_entry().
Returns:
A Resource object representing the calendar resource with the given
base URI and resource ID.
"""
if uri is None:
uri = self.MakeResourceFeedUri(resource_id)
return self.get_entry(
uri,
desired_class=gdata.calendar_resource.data.CalendarResourceEntry,
**kwargs)
GetResource = get_resource
def create_resource(self, resource_id, resource_common_name=None,
resource_description=None, resource_type=None, **kwargs):
"""Creates a calendar resource with the given properties.
Args:
resource_id: string The resource ID of the calendar resource.
resource_common_name: string (optional) The common name of the resource.
resource_description: string (optional) The description of the resource.
resource_type: string (optional) The type of the resource.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
gdata.calendar_resource.data.CalendarResourceEntry of the new resource.
"""
new_resource = gdata.calendar_resource.data.CalendarResourceEntry(
resource_id=resource_id,
resource_common_name=resource_common_name,
resource_description=resource_description,
resource_type=resource_type)
return self.post(new_resource, self.MakeResourceFeedUri(), **kwargs)
CreateResource = create_resource
def update_resource(self, resource_id, resource_common_name=None,
resource_description=None, resource_type=None, **kwargs):
"""Updates the calendar resource with the given resource ID.
Args:
resource_id: string The resource ID of the calendar resource to update.
resource_common_name: string (optional) The common name to give the
resource.
resource_description: string (optional) The description to give the
resource.
resource_type: string (optional) The type to give the resource.
kwargs: The other parameters to pass to gdata.client.GDClient.update().
Returns:
gdata.calendar_resource.data.CalendarResourceEntry of the updated
resource.
"""
new_resource = gdata.calendar_resource.data.CalendarResourceEntry(
resource_id=resource_id,
resource_common_name=resource_common_name,
resource_description=resource_description,
resource_type=resource_type)
return self.update(
new_resource,
**kwargs)
UpdateResource = update_resource
def delete_resource(self, resource_id, **kwargs):
"""Deletes the calendar resource with the given resource ID.
Args:
resource_id: string The resource ID of the calendar resource to delete.
kwargs: The other parameters to pass to gdata.client.GDClient.delete()
Returns:
An HTTP response object. See gdata.client.request().
"""
return self.delete(self.MakeResourceFeedUri(resource_id), **kwargs)
DeleteResource = delete_resource

View File

@ -0,0 +1,193 @@
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model for parsing and generating XML for the Calendar Resource API."""
__author__ = 'Vic Fryzel <vf@google.com>'
import atom.core
import atom.data
import gdata.apps
import gdata.apps_property
import gdata.data
# This is required to work around a naming conflict between the Google
# Spreadsheets API and Python's built-in property function
pyproperty = property
# The apps:property name of the resourceId property
RESOURCE_ID_NAME = 'resourceId'
# The apps:property name of the resourceCommonName property
RESOURCE_COMMON_NAME_NAME = 'resourceCommonName'
# The apps:property name of the resourceDescription property
RESOURCE_DESCRIPTION_NAME = 'resourceDescription'
# The apps:property name of the resourceType property
RESOURCE_TYPE_NAME = 'resourceType'
class CalendarResourceEntry(gdata.data.GDEntry):
"""Represents a Calendar Resource entry in object form."""
property = [gdata.apps_property.AppsProperty]
def _GetProperty(self, name):
"""Get the apps:property value with the given name.
Args:
name: string Name of the apps:property value to get.
Returns:
The apps:property value with the given name, or None if the name was
invalid.
"""
for p in self.property:
if p.name == name:
return p.value
return None
def _SetProperty(self, name, value):
"""Set the apps:property value with the given name to the given value.
Args:
name: string Name of the apps:property value to set.
value: string Value to give the apps:property value with the given name.
"""
for i in range(len(self.property)):
if self.property[i].name == name:
self.property[i].value = value
return
self.property.append(gdata.apps_property.AppsProperty(name=name, value=value))
def GetResourceId(self):
"""Get the resource ID of this Calendar Resource object.
Returns:
The resource ID of this Calendar Resource object as a string or None.
"""
return self._GetProperty(RESOURCE_ID_NAME)
def SetResourceId(self, value):
"""Set the resource ID of this Calendar Resource object.
Args:
value: string The new resource ID value to give this object.
"""
self._SetProperty(RESOURCE_ID_NAME, value)
resource_id = pyproperty(GetResourceId, SetResourceId)
def GetResourceCommonName(self):
"""Get the common name of this Calendar Resource object.
Returns:
The common name of this Calendar Resource object as a string or None.
"""
return self._GetProperty(RESOURCE_COMMON_NAME_NAME)
def SetResourceCommonName(self, value):
"""Set the common name of this Calendar Resource object.
Args:
value: string The new common name value to give this object.
"""
self._SetProperty(RESOURCE_COMMON_NAME_NAME, value)
resource_common_name = pyproperty(
GetResourceCommonName,
SetResourceCommonName)
def GetResourceDescription(self):
"""Get the description of this Calendar Resource object.
Returns:
The description of this Calendar Resource object as a string or None.
"""
return self._GetProperty(RESOURCE_DESCRIPTION_NAME)
def SetResourceDescription(self, value):
"""Set the description of this Calendar Resource object.
Args:
value: string The new description value to give this object.
"""
self._SetProperty(RESOURCE_DESCRIPTION_NAME, value)
resource_description = pyproperty(
GetResourceDescription,
SetResourceDescription)
def GetResourceType(self):
"""Get the type of this Calendar Resource object.
Returns:
The type of this Calendar Resource object as a string or None.
"""
return self._GetProperty(RESOURCE_TYPE_NAME)
def SetResourceType(self, value):
"""Set the type value of this Calendar Resource object.
Args:
value: string The new type value to give this object.
"""
self._SetProperty(RESOURCE_TYPE_NAME, value)
resource_type = pyproperty(GetResourceType, SetResourceType)
def __init__(self, resource_id=None, resource_common_name=None,
resource_description=None, resource_type=None, *args, **kwargs):
"""Constructs a new CalendarResourceEntry object with the given arguments.
Args:
resource_id: string (optional) The resource ID to give this new object.
resource_common_name: string (optional) The common name to give this new
object.
resource_description: string (optional) The description to give this new
object.
resource_type: string (optional) The type to give this new object.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(CalendarResourceEntry, self).__init__(*args, **kwargs)
if resource_id:
self.resource_id = resource_id
if resource_common_name:
self.resource_common_name = resource_common_name
if resource_description:
self.resource_description = resource_description
if resource_type:
self.resource_type = resource_type
class CalendarResourceFeed(gdata.data.GDFeed):
"""Represents a feed of CalendarResourceEntry objects."""
# Override entry so that this feed knows how to type its list of entries.
entry = [CalendarResourceEntry]

1126
python/gdata/client.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,136 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007 Benoit Chesneau <benoitc@metavers.net>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Contains extensions to Atom objects used by Google Codesearch"""
__author__ = 'Benoit Chesneau'
import atom
import gdata
CODESEARCH_NAMESPACE='http://schemas.google.com/codesearch/2006'
CODESEARCH_TEMPLATE='{http://shema.google.com/codesearch/2006}%s'
class Match(atom.AtomBase):
""" The Google Codesearch match element """
_tag = 'match'
_namespace = CODESEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['lineNumber'] = 'line_number'
_attributes['type'] = 'type'
def __init__(self, line_number=None, type=None, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.type = type
self.line_number = line_number
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class File(atom.AtomBase):
""" The Google Codesearch file element"""
_tag = 'file'
_namespace = CODESEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.name = name
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Package(atom.AtomBase):
""" The Google Codesearch package element"""
_tag = 'package'
_namespace = CODESEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['uri'] = 'uri'
def __init__(self, name=None, uri=None, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.name = name
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class CodesearchEntry(gdata.GDataEntry):
""" Google codesearch atom entry"""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}file' % CODESEARCH_NAMESPACE] = ('file', File)
_children['{%s}package' % CODESEARCH_NAMESPACE] = ('package', Package)
_children['{%s}match' % CODESEARCH_NAMESPACE] = ('match', [Match])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
match=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title,
updated=updated, text=None)
self.match = match or []
def CodesearchEntryFromString(xml_string):
"""Converts an XML string into a CodesearchEntry object.
Args:
xml_string: string The XML describing a Codesearch feed entry.
Returns:
A CodesearchEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(CodesearchEntry, xml_string)
class CodesearchFeed(gdata.GDataFeed):
"""feed containing list of Google codesearch Items"""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CodesearchEntry])
def CodesearchFeedFromString(xml_string):
"""Converts an XML string into a CodesearchFeed object.
Args:
xml_string: string The XML describing a Codesearch feed.
Returns:
A CodeseartchFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(CodesearchFeed, xml_string)

Some files were not shown because too many files have changed in this diff Show More