faset over fra Z3950 til google books
This commit is contained in:
33
python/gdata/Crypto/Cipher/__init__.py
Normal file
33
python/gdata/Crypto/Cipher/__init__.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""Secret-key encryption algorithms.
|
||||
|
||||
Secret-key encryption algorithms transform plaintext in some way that
|
||||
is dependent on a key, producing ciphertext. This transformation can
|
||||
easily be reversed, if (and, hopefully, only if) one knows the key.
|
||||
|
||||
The encryption modules here all support the interface described in PEP
|
||||
272, "API for Block Encryption Algorithms".
|
||||
|
||||
If you don't know which algorithm to choose, use AES because it's
|
||||
standard and has undergone a fair bit of examination.
|
||||
|
||||
Crypto.Cipher.AES Advanced Encryption Standard
|
||||
Crypto.Cipher.ARC2 Alleged RC2
|
||||
Crypto.Cipher.ARC4 Alleged RC4
|
||||
Crypto.Cipher.Blowfish
|
||||
Crypto.Cipher.CAST
|
||||
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
|
||||
in the past, but today its 56-bit keys are too small.
|
||||
Crypto.Cipher.DES3 Triple DES.
|
||||
Crypto.Cipher.IDEA
|
||||
Crypto.Cipher.RC5
|
||||
Crypto.Cipher.XOR The simple XOR cipher.
|
||||
"""
|
||||
|
||||
__all__ = ['AES', 'ARC2', 'ARC4',
|
||||
'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5',
|
||||
'XOR'
|
||||
]
|
||||
|
||||
__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $"
|
||||
|
||||
|
||||
108
python/gdata/Crypto/Hash/HMAC.py
Normal file
108
python/gdata/Crypto/Hash/HMAC.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""HMAC (Keyed-Hashing for Message Authentication) Python module.
|
||||
|
||||
Implements the HMAC algorithm as described by RFC 2104.
|
||||
|
||||
This is just a copy of the Python 2.2 HMAC module, modified to work when
|
||||
used on versions of Python before 2.2.
|
||||
"""
|
||||
|
||||
__revision__ = "$Id: HMAC.py,v 1.5 2002/07/25 17:19:02 z3p Exp $"
|
||||
|
||||
import string
|
||||
|
||||
def _strxor(s1, s2):
|
||||
"""Utility method. XOR the two strings s1 and s2 (must have same length).
|
||||
"""
|
||||
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
|
||||
|
||||
# The size of the digests returned by HMAC depends on the underlying
|
||||
# hashing module used.
|
||||
digest_size = None
|
||||
|
||||
class HMAC:
|
||||
"""RFC2104 HMAC class.
|
||||
|
||||
This supports the API for Cryptographic Hash Functions (PEP 247).
|
||||
"""
|
||||
|
||||
def __init__(self, key, msg = None, digestmod = None):
|
||||
"""Create a new HMAC object.
|
||||
|
||||
key: key for the keyed hash object.
|
||||
msg: Initial input for the hash, if provided.
|
||||
digestmod: A module supporting PEP 247. Defaults to the md5 module.
|
||||
"""
|
||||
if digestmod == None:
|
||||
import md5
|
||||
digestmod = md5
|
||||
|
||||
self.digestmod = digestmod
|
||||
self.outer = digestmod.new()
|
||||
self.inner = digestmod.new()
|
||||
try:
|
||||
self.digest_size = digestmod.digest_size
|
||||
except AttributeError:
|
||||
self.digest_size = len(self.outer.digest())
|
||||
|
||||
blocksize = 64
|
||||
ipad = "\x36" * blocksize
|
||||
opad = "\x5C" * blocksize
|
||||
|
||||
if len(key) > blocksize:
|
||||
key = digestmod.new(key).digest()
|
||||
|
||||
key = key + chr(0) * (blocksize - len(key))
|
||||
self.outer.update(_strxor(key, opad))
|
||||
self.inner.update(_strxor(key, ipad))
|
||||
if (msg):
|
||||
self.update(msg)
|
||||
|
||||
## def clear(self):
|
||||
## raise NotImplementedError, "clear() method not available in HMAC."
|
||||
|
||||
def update(self, msg):
|
||||
"""Update this hashing object with the string msg.
|
||||
"""
|
||||
self.inner.update(msg)
|
||||
|
||||
def copy(self):
|
||||
"""Return a separate copy of this hashing object.
|
||||
|
||||
An update to this copy won't affect the original object.
|
||||
"""
|
||||
other = HMAC("")
|
||||
other.digestmod = self.digestmod
|
||||
other.inner = self.inner.copy()
|
||||
other.outer = self.outer.copy()
|
||||
return other
|
||||
|
||||
def digest(self):
|
||||
"""Return the hash value of this hashing object.
|
||||
|
||||
This returns a string containing 8-bit data. The object is
|
||||
not altered in any way by this function; you can continue
|
||||
updating the object after calling this function.
|
||||
"""
|
||||
h = self.outer.copy()
|
||||
h.update(self.inner.digest())
|
||||
return h.digest()
|
||||
|
||||
def hexdigest(self):
|
||||
"""Like digest(), but returns a string of hexadecimal digits instead.
|
||||
"""
|
||||
return "".join([string.zfill(hex(ord(x))[2:], 2)
|
||||
for x in tuple(self.digest())])
|
||||
|
||||
def new(key, msg = None, digestmod = None):
|
||||
"""Create a new hashing object and return it.
|
||||
|
||||
key: The starting key for the hash.
|
||||
msg: if available, will immediately be hashed into the object's starting
|
||||
state.
|
||||
|
||||
You can now feed arbitrary strings into the object using its update()
|
||||
method, and can ask for the hash value at any time by calling its digest()
|
||||
method.
|
||||
"""
|
||||
return HMAC(key, msg, digestmod)
|
||||
|
||||
13
python/gdata/Crypto/Hash/MD5.py
Normal file
13
python/gdata/Crypto/Hash/MD5.py
Normal file
@@ -0,0 +1,13 @@
|
||||
|
||||
# Just use the MD5 module from the Python standard library
|
||||
|
||||
__revision__ = "$Id: MD5.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $"
|
||||
|
||||
from md5 import *
|
||||
|
||||
import md5
|
||||
if hasattr(md5, 'digestsize'):
|
||||
digest_size = digestsize
|
||||
del digestsize
|
||||
del md5
|
||||
|
||||
11
python/gdata/Crypto/Hash/SHA.py
Normal file
11
python/gdata/Crypto/Hash/SHA.py
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
# Just use the SHA module from the Python standard library
|
||||
|
||||
__revision__ = "$Id: SHA.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $"
|
||||
|
||||
from sha import *
|
||||
import sha
|
||||
if hasattr(sha, 'digestsize'):
|
||||
digest_size = digestsize
|
||||
del digestsize
|
||||
del sha
|
||||
24
python/gdata/Crypto/Hash/__init__.py
Normal file
24
python/gdata/Crypto/Hash/__init__.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""Hashing algorithms
|
||||
|
||||
Hash functions take arbitrary strings as input, and produce an output
|
||||
of fixed size that is dependent on the input; it should never be
|
||||
possible to derive the input data given only the hash function's
|
||||
output. Hash functions can be used simply as a checksum, or, in
|
||||
association with a public-key algorithm, can be used to implement
|
||||
digital signatures.
|
||||
|
||||
The hashing modules here all support the interface described in PEP
|
||||
247, "API for Cryptographic Hash Functions".
|
||||
|
||||
Submodules:
|
||||
Crypto.Hash.HMAC RFC 2104: Keyed-Hashing for Message Authentication
|
||||
Crypto.Hash.MD2
|
||||
Crypto.Hash.MD4
|
||||
Crypto.Hash.MD5
|
||||
Crypto.Hash.RIPEMD
|
||||
Crypto.Hash.SHA
|
||||
"""
|
||||
|
||||
__all__ = ['HMAC', 'MD2', 'MD4', 'MD5', 'RIPEMD', 'SHA', 'SHA256']
|
||||
__revision__ = "$Id: __init__.py,v 1.6 2003/12/19 14:24:25 akuchling Exp $"
|
||||
|
||||
295
python/gdata/Crypto/Protocol/AllOrNothing.py
Normal file
295
python/gdata/Crypto/Protocol/AllOrNothing.py
Normal file
@@ -0,0 +1,295 @@
|
||||
"""This file implements all-or-nothing package transformations.
|
||||
|
||||
An all-or-nothing package transformation is one in which some text is
|
||||
transformed into message blocks, such that all blocks must be obtained before
|
||||
the reverse transformation can be applied. Thus, if any blocks are corrupted
|
||||
or lost, the original message cannot be reproduced.
|
||||
|
||||
An all-or-nothing package transformation is not encryption, although a block
|
||||
cipher algorithm is used. The encryption key is randomly generated and is
|
||||
extractable from the message blocks.
|
||||
|
||||
This class implements the All-Or-Nothing package transformation algorithm
|
||||
described in:
|
||||
|
||||
Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform"
|
||||
http://theory.lcs.mit.edu/~rivest/fusion.pdf
|
||||
|
||||
"""
|
||||
|
||||
__revision__ = "$Id: AllOrNothing.py,v 1.8 2003/02/28 15:23:20 akuchling Exp $"
|
||||
|
||||
import operator
|
||||
import string
|
||||
from Crypto.Util.number import bytes_to_long, long_to_bytes
|
||||
|
||||
|
||||
|
||||
class AllOrNothing:
|
||||
"""Class implementing the All-or-Nothing package transform.
|
||||
|
||||
Methods for subclassing:
|
||||
|
||||
_inventkey(key_size):
|
||||
Returns a randomly generated key. Subclasses can use this to
|
||||
implement better random key generating algorithms. The default
|
||||
algorithm is probably not very cryptographically secure.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, ciphermodule, mode=None, IV=None):
|
||||
"""AllOrNothing(ciphermodule, mode=None, IV=None)
|
||||
|
||||
ciphermodule is a module implementing the cipher algorithm to
|
||||
use. It must provide the PEP272 interface.
|
||||
|
||||
Note that the encryption key is randomly generated
|
||||
automatically when needed. Optional arguments mode and IV are
|
||||
passed directly through to the ciphermodule.new() method; they
|
||||
are the feedback mode and initialization vector to use. All
|
||||
three arguments must be the same for the object used to create
|
||||
the digest, and to undigest'ify the message blocks.
|
||||
"""
|
||||
|
||||
self.__ciphermodule = ciphermodule
|
||||
self.__mode = mode
|
||||
self.__IV = IV
|
||||
self.__key_size = ciphermodule.key_size
|
||||
if self.__key_size == 0:
|
||||
self.__key_size = 16
|
||||
|
||||
__K0digit = chr(0x69)
|
||||
|
||||
def digest(self, text):
|
||||
"""digest(text:string) : [string]
|
||||
|
||||
Perform the All-or-Nothing package transform on the given
|
||||
string. Output is a list of message blocks describing the
|
||||
transformed text, where each block is a string of bit length equal
|
||||
to the ciphermodule's block_size.
|
||||
"""
|
||||
|
||||
# generate a random session key and K0, the key used to encrypt the
|
||||
# hash blocks. Rivest calls this a fixed, publically-known encryption
|
||||
# key, but says nothing about the security implications of this key or
|
||||
# how to choose it.
|
||||
key = self._inventkey(self.__key_size)
|
||||
K0 = self.__K0digit * self.__key_size
|
||||
|
||||
# we need two cipher objects here, one that is used to encrypt the
|
||||
# message blocks and one that is used to encrypt the hashes. The
|
||||
# former uses the randomly generated key, while the latter uses the
|
||||
# well-known key.
|
||||
mcipher = self.__newcipher(key)
|
||||
hcipher = self.__newcipher(K0)
|
||||
|
||||
# Pad the text so that its length is a multiple of the cipher's
|
||||
# block_size. Pad with trailing spaces, which will be eliminated in
|
||||
# the undigest() step.
|
||||
block_size = self.__ciphermodule.block_size
|
||||
padbytes = block_size - (len(text) % block_size)
|
||||
text = text + ' ' * padbytes
|
||||
|
||||
# Run through the algorithm:
|
||||
# s: number of message blocks (size of text / block_size)
|
||||
# input sequence: m1, m2, ... ms
|
||||
# random key K' (`key' in the code)
|
||||
# Compute output sequence: m'1, m'2, ... m's' for s' = s + 1
|
||||
# Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s
|
||||
# Let m's' = K' ^ h1 ^ h2 ^ ... hs
|
||||
# where hi = E(K0, m'i ^ i) for i = 1, 2, ... s
|
||||
#
|
||||
# The one complication I add is that the last message block is hard
|
||||
# coded to the number of padbytes added, so that these can be stripped
|
||||
# during the undigest() step
|
||||
s = len(text) / block_size
|
||||
blocks = []
|
||||
hashes = []
|
||||
for i in range(1, s+1):
|
||||
start = (i-1) * block_size
|
||||
end = start + block_size
|
||||
mi = text[start:end]
|
||||
assert len(mi) == block_size
|
||||
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
|
||||
mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock)
|
||||
blocks.append(mticki)
|
||||
# calculate the hash block for this block
|
||||
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
|
||||
hashes.append(bytes_to_long(hi))
|
||||
|
||||
# Add the padbytes length as a message block
|
||||
i = i + 1
|
||||
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
|
||||
mticki = padbytes ^ bytes_to_long(cipherblock)
|
||||
blocks.append(mticki)
|
||||
|
||||
# calculate this block's hash
|
||||
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
|
||||
hashes.append(bytes_to_long(hi))
|
||||
|
||||
# Now calculate the last message block of the sequence 1..s'. This
|
||||
# will contain the random session key XOR'd with all the hash blocks,
|
||||
# so that for undigest(), once all the hash blocks are calculated, the
|
||||
# session key can be trivially extracted. Calculating all the hash
|
||||
# blocks requires that all the message blocks be received, thus the
|
||||
# All-or-Nothing algorithm succeeds.
|
||||
mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes)
|
||||
blocks.append(mtick_stick)
|
||||
|
||||
# we convert the blocks to strings since in Python, byte sequences are
|
||||
# always represented as strings. This is more consistent with the
|
||||
# model that encryption and hash algorithms always operate on strings.
|
||||
return map(long_to_bytes, blocks)
|
||||
|
||||
|
||||
def undigest(self, blocks):
|
||||
"""undigest(blocks : [string]) : string
|
||||
|
||||
Perform the reverse package transformation on a list of message
|
||||
blocks. Note that the ciphermodule used for both transformations
|
||||
must be the same. blocks is a list of strings of bit length
|
||||
equal to the ciphermodule's block_size.
|
||||
"""
|
||||
|
||||
# better have at least 2 blocks, for the padbytes package and the hash
|
||||
# block accumulator
|
||||
if len(blocks) < 2:
|
||||
raise ValueError, "List must be at least length 2."
|
||||
|
||||
# blocks is a list of strings. We need to deal with them as long
|
||||
# integers
|
||||
blocks = map(bytes_to_long, blocks)
|
||||
|
||||
# Calculate the well-known key, to which the hash blocks are
|
||||
# encrypted, and create the hash cipher.
|
||||
K0 = self.__K0digit * self.__key_size
|
||||
hcipher = self.__newcipher(K0)
|
||||
|
||||
# Since we have all the blocks (or this method would have been called
|
||||
# prematurely), we can calcualte all the hash blocks.
|
||||
hashes = []
|
||||
for i in range(1, len(blocks)):
|
||||
mticki = blocks[i-1] ^ i
|
||||
hi = hcipher.encrypt(long_to_bytes(mticki))
|
||||
hashes.append(bytes_to_long(hi))
|
||||
|
||||
# now we can calculate K' (key). remember the last block contains
|
||||
# m's' which we don't include here
|
||||
key = blocks[-1] ^ reduce(operator.xor, hashes)
|
||||
|
||||
# and now we can create the cipher object
|
||||
mcipher = self.__newcipher(long_to_bytes(key))
|
||||
block_size = self.__ciphermodule.block_size
|
||||
|
||||
# And we can now decode the original message blocks
|
||||
parts = []
|
||||
for i in range(1, len(blocks)):
|
||||
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
|
||||
mi = blocks[i-1] ^ bytes_to_long(cipherblock)
|
||||
parts.append(mi)
|
||||
|
||||
# The last message block contains the number of pad bytes appended to
|
||||
# the original text string, such that its length was an even multiple
|
||||
# of the cipher's block_size. This number should be small enough that
|
||||
# the conversion from long integer to integer should never overflow
|
||||
padbytes = int(parts[-1])
|
||||
text = string.join(map(long_to_bytes, parts[:-1]), '')
|
||||
return text[:-padbytes]
|
||||
|
||||
def _inventkey(self, key_size):
|
||||
# TBD: Not a very secure algorithm. Eventually, I'd like to use JHy's
|
||||
# kernelrand module
|
||||
import time
|
||||
from Crypto.Util import randpool
|
||||
# TBD: key_size * 2 to work around possible bug in RandomPool?
|
||||
pool = randpool.RandomPool(key_size * 2)
|
||||
while key_size > pool.entropy:
|
||||
pool.add_event()
|
||||
|
||||
# we now have enough entropy in the pool to get a key_size'd key
|
||||
return pool.get_bytes(key_size)
|
||||
|
||||
def __newcipher(self, key):
|
||||
if self.__mode is None and self.__IV is None:
|
||||
return self.__ciphermodule.new(key)
|
||||
elif self.__IV is None:
|
||||
return self.__ciphermodule.new(key, self.__mode)
|
||||
else:
|
||||
return self.__ciphermodule.new(key, self.__mode, self.__IV)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
import getopt
|
||||
import base64
|
||||
|
||||
usagemsg = '''\
|
||||
Test module usage: %(program)s [-c cipher] [-l] [-h]
|
||||
|
||||
Where:
|
||||
--cipher module
|
||||
-c module
|
||||
Cipher module to use. Default: %(ciphermodule)s
|
||||
|
||||
--aslong
|
||||
-l
|
||||
Print the encoded message blocks as long integers instead of base64
|
||||
encoded strings
|
||||
|
||||
--help
|
||||
-h
|
||||
Print this help message
|
||||
'''
|
||||
|
||||
ciphermodule = 'AES'
|
||||
aslong = 0
|
||||
|
||||
def usage(code, msg=None):
|
||||
if msg:
|
||||
print msg
|
||||
print usagemsg % {'program': sys.argv[0],
|
||||
'ciphermodule': ciphermodule}
|
||||
sys.exit(code)
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:],
|
||||
'c:l', ['cipher=', 'aslong'])
|
||||
except getopt.error, msg:
|
||||
usage(1, msg)
|
||||
|
||||
if args:
|
||||
usage(1, 'Too many arguments')
|
||||
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-c', '--cipher'):
|
||||
ciphermodule = arg
|
||||
elif opt in ('-l', '--aslong'):
|
||||
aslong = 1
|
||||
|
||||
# ugly hack to force __import__ to give us the end-path module
|
||||
module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new'])
|
||||
|
||||
a = AllOrNothing(module)
|
||||
print 'Original text:\n=========='
|
||||
print __doc__
|
||||
print '=========='
|
||||
msgblocks = a.digest(__doc__)
|
||||
print 'message blocks:'
|
||||
for i, blk in map(None, range(len(msgblocks)), msgblocks):
|
||||
# base64 adds a trailing newline
|
||||
print ' %3d' % i,
|
||||
if aslong:
|
||||
print bytes_to_long(blk)
|
||||
else:
|
||||
print base64.encodestring(blk)[:-1]
|
||||
#
|
||||
# get a new undigest-only object so there's no leakage
|
||||
b = AllOrNothing(module)
|
||||
text = b.undigest(msgblocks)
|
||||
if text == __doc__:
|
||||
print 'They match!'
|
||||
else:
|
||||
print 'They differ!'
|
||||
229
python/gdata/Crypto/Protocol/Chaffing.py
Normal file
229
python/gdata/Crypto/Protocol/Chaffing.py
Normal file
@@ -0,0 +1,229 @@
|
||||
"""This file implements the chaffing algorithm.
|
||||
|
||||
Winnowing and chaffing is a technique for enhancing privacy without requiring
|
||||
strong encryption. In short, the technique takes a set of authenticated
|
||||
message blocks (the wheat) and adds a number of chaff blocks which have
|
||||
randomly chosen data and MAC fields. This means that to an adversary, the
|
||||
chaff blocks look as valid as the wheat blocks, and so the authentication
|
||||
would have to be performed on every block. By tailoring the number of chaff
|
||||
blocks added to the message, the sender can make breaking the message
|
||||
computationally infeasible. There are many other interesting properties of
|
||||
the winnow/chaff technique.
|
||||
|
||||
For example, say Alice is sending a message to Bob. She packetizes the
|
||||
message and performs an all-or-nothing transformation on the packets. Then
|
||||
she authenticates each packet with a message authentication code (MAC). The
|
||||
MAC is a hash of the data packet, and there is a secret key which she must
|
||||
share with Bob (key distribution is an exercise left to the reader). She then
|
||||
adds a serial number to each packet, and sends the packets to Bob.
|
||||
|
||||
Bob receives the packets, and using the shared secret authentication key,
|
||||
authenticates the MACs for each packet. Those packets that have bad MACs are
|
||||
simply discarded. The remainder are sorted by serial number, and passed
|
||||
through the reverse all-or-nothing transform. The transform means that an
|
||||
eavesdropper (say Eve) must acquire all the packets before any of the data can
|
||||
be read. If even one packet is missing, the data is useless.
|
||||
|
||||
There's one twist: by adding chaff packets, Alice and Bob can make Eve's job
|
||||
much harder, since Eve now has to break the shared secret key, or try every
|
||||
combination of wheat and chaff packet to read any of the message. The cool
|
||||
thing is that Bob doesn't need to add any additional code; the chaff packets
|
||||
are already filtered out because their MACs don't match (in all likelihood --
|
||||
since the data and MACs for the chaff packets are randomly chosen it is
|
||||
possible, but very unlikely that a chaff MAC will match the chaff data). And
|
||||
Alice need not even be the party adding the chaff! She could be completely
|
||||
unaware that a third party, say Charles, is adding chaff packets to her
|
||||
messages as they are transmitted.
|
||||
|
||||
For more information on winnowing and chaffing see this paper:
|
||||
|
||||
Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption"
|
||||
http://theory.lcs.mit.edu/~rivest/chaffing.txt
|
||||
|
||||
"""
|
||||
|
||||
__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $"
|
||||
|
||||
from Crypto.Util.number import bytes_to_long
|
||||
|
||||
class Chaff:
|
||||
"""Class implementing the chaff adding algorithm.
|
||||
|
||||
Methods for subclasses:
|
||||
|
||||
_randnum(size):
|
||||
Returns a randomly generated number with a byte-length equal
|
||||
to size. Subclasses can use this to implement better random
|
||||
data and MAC generating algorithms. The default algorithm is
|
||||
probably not very cryptographically secure. It is most
|
||||
important that the chaff data does not contain any patterns
|
||||
that can be used to discern it from wheat data without running
|
||||
the MAC.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, factor=1.0, blocksper=1):
|
||||
"""Chaff(factor:float, blocksper:int)
|
||||
|
||||
factor is the number of message blocks to add chaff to,
|
||||
expressed as a percentage between 0.0 and 1.0. blocksper is
|
||||
the number of chaff blocks to include for each block being
|
||||
chaffed. Thus the defaults add one chaff block to every
|
||||
message block. By changing the defaults, you can adjust how
|
||||
computationally difficult it could be for an adversary to
|
||||
brute-force crack the message. The difficulty is expressed
|
||||
as:
|
||||
|
||||
pow(blocksper, int(factor * number-of-blocks))
|
||||
|
||||
For ease of implementation, when factor < 1.0, only the first
|
||||
int(factor*number-of-blocks) message blocks are chaffed.
|
||||
"""
|
||||
|
||||
if not (0.0<=factor<=1.0):
|
||||
raise ValueError, "'factor' must be between 0.0 and 1.0"
|
||||
if blocksper < 0:
|
||||
raise ValueError, "'blocksper' must be zero or more"
|
||||
|
||||
self.__factor = factor
|
||||
self.__blocksper = blocksper
|
||||
|
||||
|
||||
def chaff(self, blocks):
|
||||
"""chaff( [(serial-number:int, data:string, MAC:string)] )
|
||||
: [(int, string, string)]
|
||||
|
||||
Add chaff to message blocks. blocks is a list of 3-tuples of the
|
||||
form (serial-number, data, MAC).
|
||||
|
||||
Chaff is created by choosing a random number of the same
|
||||
byte-length as data, and another random number of the same
|
||||
byte-length as MAC. The message block's serial number is
|
||||
placed on the chaff block and all the packet's chaff blocks
|
||||
are randomly interspersed with the single wheat block. This
|
||||
method then returns a list of 3-tuples of the same form.
|
||||
Chaffed blocks will contain multiple instances of 3-tuples
|
||||
with the same serial number, but the only way to figure out
|
||||
which blocks are wheat and which are chaff is to perform the
|
||||
MAC hash and compare values.
|
||||
"""
|
||||
|
||||
chaffedblocks = []
|
||||
|
||||
# count is the number of blocks to add chaff to. blocksper is the
|
||||
# number of chaff blocks to add per message block that is being
|
||||
# chaffed.
|
||||
count = len(blocks) * self.__factor
|
||||
blocksper = range(self.__blocksper)
|
||||
for i, wheat in map(None, range(len(blocks)), blocks):
|
||||
# it shouldn't matter which of the n blocks we add chaff to, so for
|
||||
# ease of implementation, we'll just add them to the first count
|
||||
# blocks
|
||||
if i < count:
|
||||
serial, data, mac = wheat
|
||||
datasize = len(data)
|
||||
macsize = len(mac)
|
||||
addwheat = 1
|
||||
# add chaff to this block
|
||||
for j in blocksper:
|
||||
import sys
|
||||
chaffdata = self._randnum(datasize)
|
||||
chaffmac = self._randnum(macsize)
|
||||
chaff = (serial, chaffdata, chaffmac)
|
||||
# mix up the order, if the 5th bit is on then put the
|
||||
# wheat on the list
|
||||
if addwheat and bytes_to_long(self._randnum(16)) & 0x40:
|
||||
chaffedblocks.append(wheat)
|
||||
addwheat = 0
|
||||
chaffedblocks.append(chaff)
|
||||
if addwheat:
|
||||
chaffedblocks.append(wheat)
|
||||
else:
|
||||
# just add the wheat
|
||||
chaffedblocks.append(wheat)
|
||||
return chaffedblocks
|
||||
|
||||
def _randnum(self, size):
|
||||
# TBD: Not a very secure algorithm.
|
||||
# TBD: size * 2 to work around possible bug in RandomPool
|
||||
from Crypto.Util import randpool
|
||||
import time
|
||||
pool = randpool.RandomPool(size * 2)
|
||||
while size > pool.entropy:
|
||||
pass
|
||||
|
||||
# we now have enough entropy in the pool to get size bytes of random
|
||||
# data... well, probably
|
||||
return pool.get_bytes(size)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
text = """\
|
||||
We hold these truths to be self-evident, that all men are created equal, that
|
||||
they are endowed by their Creator with certain unalienable Rights, that among
|
||||
these are Life, Liberty, and the pursuit of Happiness. That to secure these
|
||||
rights, Governments are instituted among Men, deriving their just powers from
|
||||
the consent of the governed. That whenever any Form of Government becomes
|
||||
destructive of these ends, it is the Right of the People to alter or to
|
||||
abolish it, and to institute new Government, laying its foundation on such
|
||||
principles and organizing its powers in such form, as to them shall seem most
|
||||
likely to effect their Safety and Happiness.
|
||||
"""
|
||||
print 'Original text:\n=========='
|
||||
print text
|
||||
print '=========='
|
||||
|
||||
# first transform the text into packets
|
||||
blocks = [] ; size = 40
|
||||
for i in range(0, len(text), size):
|
||||
blocks.append( text[i:i+size] )
|
||||
|
||||
# now get MACs for all the text blocks. The key is obvious...
|
||||
print 'Calculating MACs...'
|
||||
from Crypto.Hash import HMAC, SHA
|
||||
key = 'Jefferson'
|
||||
macs = [HMAC.new(key, block, digestmod=SHA).digest()
|
||||
for block in blocks]
|
||||
|
||||
assert len(blocks) == len(macs)
|
||||
|
||||
# put these into a form acceptable as input to the chaffing procedure
|
||||
source = []
|
||||
m = map(None, range(len(blocks)), blocks, macs)
|
||||
print m
|
||||
for i, data, mac in m:
|
||||
source.append((i, data, mac))
|
||||
|
||||
# now chaff these
|
||||
print 'Adding chaff...'
|
||||
c = Chaff(factor=0.5, blocksper=2)
|
||||
chaffed = c.chaff(source)
|
||||
|
||||
from base64 import encodestring
|
||||
|
||||
# print the chaffed message blocks. meanwhile, separate the wheat from
|
||||
# the chaff
|
||||
|
||||
wheat = []
|
||||
print 'chaffed message blocks:'
|
||||
for i, data, mac in chaffed:
|
||||
# do the authentication
|
||||
h = HMAC.new(key, data, digestmod=SHA)
|
||||
pmac = h.digest()
|
||||
if pmac == mac:
|
||||
tag = '-->'
|
||||
wheat.append(data)
|
||||
else:
|
||||
tag = ' '
|
||||
# base64 adds a trailing newline
|
||||
print tag, '%3d' % i, \
|
||||
repr(data), encodestring(mac)[:-1]
|
||||
|
||||
# now decode the message packets and check it against the original text
|
||||
print 'Undigesting wheat...'
|
||||
newtext = "".join(wheat)
|
||||
if newtext == text:
|
||||
print 'They match!'
|
||||
else:
|
||||
print 'They differ!'
|
||||
17
python/gdata/Crypto/Protocol/__init__.py
Normal file
17
python/gdata/Crypto/Protocol/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
|
||||
"""Cryptographic protocols
|
||||
|
||||
Implements various cryptographic protocols. (Don't expect to find
|
||||
network protocols here.)
|
||||
|
||||
Crypto.Protocol.AllOrNothing Transforms a message into a set of message
|
||||
blocks, such that the blocks can be
|
||||
recombined to get the message back.
|
||||
|
||||
Crypto.Protocol.Chaffing Takes a set of authenticated message blocks
|
||||
(the wheat) and adds a number of
|
||||
randomly generated blocks (the chaff).
|
||||
"""
|
||||
|
||||
__all__ = ['AllOrNothing', 'Chaffing']
|
||||
__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:23:21 akuchling Exp $"
|
||||
238
python/gdata/Crypto/PublicKey/DSA.py
Normal file
238
python/gdata/Crypto/PublicKey/DSA.py
Normal file
@@ -0,0 +1,238 @@
|
||||
|
||||
#
|
||||
# DSA.py : Digital Signature Algorithm
|
||||
#
|
||||
# Part of the Python Cryptography Toolkit
|
||||
#
|
||||
# Distribute and use freely; there are no restrictions on further
|
||||
# dissemination and usage except those imposed by the laws of your
|
||||
# country of residence. This software is provided "as is" without
|
||||
# warranty of fitness for use or suitability for any purpose, express
|
||||
# or implied. Use at your own risk or not at all.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $"
|
||||
|
||||
from Crypto.PublicKey.pubkey import *
|
||||
from Crypto.Util import number
|
||||
from Crypto.Util.number import bytes_to_long, long_to_bytes
|
||||
from Crypto.Hash import SHA
|
||||
|
||||
try:
|
||||
from Crypto.PublicKey import _fastmath
|
||||
except ImportError:
|
||||
_fastmath = None
|
||||
|
||||
class error (Exception):
|
||||
pass
|
||||
|
||||
def generateQ(randfunc):
|
||||
S=randfunc(20)
|
||||
hash1=SHA.new(S).digest()
|
||||
hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest()
|
||||
q = bignum(0)
|
||||
for i in range(0,20):
|
||||
c=ord(hash1[i])^ord(hash2[i])
|
||||
if i==0:
|
||||
c=c | 128
|
||||
if i==19:
|
||||
c= c | 1
|
||||
q=q*256+c
|
||||
while (not isPrime(q)):
|
||||
q=q+2
|
||||
if pow(2,159L) < q < pow(2,160L):
|
||||
return S, q
|
||||
raise error, 'Bad q value generated'
|
||||
|
||||
def generate(bits, randfunc, progress_func=None):
|
||||
"""generate(bits:int, randfunc:callable, progress_func:callable)
|
||||
|
||||
Generate a DSA key of length 'bits', using 'randfunc' to get
|
||||
random data and 'progress_func', if present, to display
|
||||
the progress of the key generation.
|
||||
"""
|
||||
|
||||
if bits<160:
|
||||
raise error, 'Key length <160 bits'
|
||||
obj=DSAobj()
|
||||
# Generate string S and prime q
|
||||
if progress_func:
|
||||
progress_func('p,q\n')
|
||||
while (1):
|
||||
S, obj.q = generateQ(randfunc)
|
||||
n=(bits-1)/160
|
||||
C, N, V = 0, 2, {}
|
||||
b=(obj.q >> 5) & 15
|
||||
powb=pow(bignum(2), b)
|
||||
powL1=pow(bignum(2), bits-1)
|
||||
while C<4096:
|
||||
for k in range(0, n+1):
|
||||
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
|
||||
W=V[n] % powb
|
||||
for k in range(n-1, -1, -1):
|
||||
W=(W<<160L)+V[k]
|
||||
X=W+powL1
|
||||
p=X-(X%(2*obj.q)-1)
|
||||
if powL1<=p and isPrime(p):
|
||||
break
|
||||
C, N = C+1, N+n+1
|
||||
if C<4096:
|
||||
break
|
||||
if progress_func:
|
||||
progress_func('4096 multiples failed\n')
|
||||
|
||||
obj.p = p
|
||||
power=(p-1)/obj.q
|
||||
if progress_func:
|
||||
progress_func('h,g\n')
|
||||
while (1):
|
||||
h=bytes_to_long(randfunc(bits)) % (p-1)
|
||||
g=pow(h, power, p)
|
||||
if 1<h<p-1 and g>1:
|
||||
break
|
||||
obj.g=g
|
||||
if progress_func:
|
||||
progress_func('x,y\n')
|
||||
while (1):
|
||||
x=bytes_to_long(randfunc(20))
|
||||
if 0 < x < obj.q:
|
||||
break
|
||||
obj.x, obj.y = x, pow(g, x, p)
|
||||
return obj
|
||||
|
||||
def construct(tuple):
|
||||
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj
|
||||
Construct a DSA object from a 4- or 5-tuple of numbers.
|
||||
"""
|
||||
obj=DSAobj()
|
||||
if len(tuple) not in [4,5]:
|
||||
raise error, 'argument for construct() wrong length'
|
||||
for i in range(len(tuple)):
|
||||
field = obj.keydata[i]
|
||||
setattr(obj, field, tuple[i])
|
||||
return obj
|
||||
|
||||
class DSAobj(pubkey):
|
||||
keydata=['y', 'g', 'p', 'q', 'x']
|
||||
|
||||
def _encrypt(self, s, Kstr):
|
||||
raise error, 'DSA algorithm cannot encrypt data'
|
||||
|
||||
def _decrypt(self, s):
|
||||
raise error, 'DSA algorithm cannot decrypt data'
|
||||
|
||||
def _sign(self, M, K):
|
||||
if (K<2 or self.q<=K):
|
||||
raise error, 'K is not between 2 and q'
|
||||
r=pow(self.g, K, self.p) % self.q
|
||||
s=(inverse(K, self.q)*(M+self.x*r)) % self.q
|
||||
return (r,s)
|
||||
|
||||
def _verify(self, M, sig):
|
||||
r, s = sig
|
||||
if r<=0 or r>=self.q or s<=0 or s>=self.q:
|
||||
return 0
|
||||
w=inverse(s, self.q)
|
||||
u1, u2 = (M*w) % self.q, (r*w) % self.q
|
||||
v1 = pow(self.g, u1, self.p)
|
||||
v2 = pow(self.y, u2, self.p)
|
||||
v = ((v1*v2) % self.p)
|
||||
v = v % self.q
|
||||
if v==r:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def size(self):
|
||||
"Return the maximum number of bits that can be handled by this key."
|
||||
return number.size(self.p) - 1
|
||||
|
||||
def has_private(self):
|
||||
"""Return a Boolean denoting whether the object contains
|
||||
private components."""
|
||||
if hasattr(self, 'x'):
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def can_sign(self):
|
||||
"""Return a Boolean value recording whether this algorithm can generate signatures."""
|
||||
return 1
|
||||
|
||||
def can_encrypt(self):
|
||||
"""Return a Boolean value recording whether this algorithm can encrypt data."""
|
||||
return 0
|
||||
|
||||
def publickey(self):
|
||||
"""Return a new key object containing only the public information."""
|
||||
return construct((self.y, self.g, self.p, self.q))
|
||||
|
||||
object=DSAobj
|
||||
|
||||
generate_py = generate
|
||||
construct_py = construct
|
||||
|
||||
class DSAobj_c(pubkey):
|
||||
keydata = ['y', 'g', 'p', 'q', 'x']
|
||||
|
||||
def __init__(self, key):
|
||||
self.key = key
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr in self.keydata:
|
||||
return getattr(self.key, attr)
|
||||
else:
|
||||
if self.__dict__.has_key(attr):
|
||||
self.__dict__[attr]
|
||||
else:
|
||||
raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr)
|
||||
|
||||
def __getstate__(self):
|
||||
d = {}
|
||||
for k in self.keydata:
|
||||
if hasattr(self.key, k):
|
||||
d[k]=getattr(self.key, k)
|
||||
return d
|
||||
|
||||
def __setstate__(self, state):
|
||||
y,g,p,q = state['y'], state['g'], state['p'], state['q']
|
||||
if not state.has_key('x'):
|
||||
self.key = _fastmath.dsa_construct(y,g,p,q)
|
||||
else:
|
||||
x = state['x']
|
||||
self.key = _fastmath.dsa_construct(y,g,p,q,x)
|
||||
|
||||
def _sign(self, M, K):
|
||||
return self.key._sign(M, K)
|
||||
|
||||
def _verify(self, M, (r, s)):
|
||||
return self.key._verify(M, r, s)
|
||||
|
||||
def size(self):
|
||||
return self.key.size()
|
||||
|
||||
def has_private(self):
|
||||
return self.key.has_private()
|
||||
|
||||
def publickey(self):
|
||||
return construct_c((self.key.y, self.key.g, self.key.p, self.key.q))
|
||||
|
||||
def can_sign(self):
|
||||
return 1
|
||||
|
||||
def can_encrypt(self):
|
||||
return 0
|
||||
|
||||
def generate_c(bits, randfunc, progress_func=None):
|
||||
obj = generate_py(bits, randfunc, progress_func)
|
||||
y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x
|
||||
return construct_c((y,g,p,q,x))
|
||||
|
||||
def construct_c(tuple):
|
||||
key = apply(_fastmath.dsa_construct, tuple)
|
||||
return DSAobj_c(key)
|
||||
|
||||
if _fastmath:
|
||||
#print "using C version of DSA"
|
||||
generate = generate_c
|
||||
construct = construct_c
|
||||
error = _fastmath.error
|
||||
132
python/gdata/Crypto/PublicKey/ElGamal.py
Normal file
132
python/gdata/Crypto/PublicKey/ElGamal.py
Normal file
@@ -0,0 +1,132 @@
|
||||
#
|
||||
# ElGamal.py : ElGamal encryption/decryption and signatures
|
||||
#
|
||||
# Part of the Python Cryptography Toolkit
|
||||
#
|
||||
# Distribute and use freely; there are no restrictions on further
|
||||
# dissemination and usage except those imposed by the laws of your
|
||||
# country of residence. This software is provided "as is" without
|
||||
# warranty of fitness for use or suitability for any purpose, express
|
||||
# or implied. Use at your own risk or not at all.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: ElGamal.py,v 1.9 2003/04/04 19:44:26 akuchling Exp $"
|
||||
|
||||
from Crypto.PublicKey.pubkey import *
|
||||
from Crypto.Util import number
|
||||
|
||||
class error (Exception):
|
||||
pass
|
||||
|
||||
# Generate an ElGamal key with N bits
|
||||
def generate(bits, randfunc, progress_func=None):
|
||||
"""generate(bits:int, randfunc:callable, progress_func:callable)
|
||||
|
||||
Generate an ElGamal key of length 'bits', using 'randfunc' to get
|
||||
random data and 'progress_func', if present, to display
|
||||
the progress of the key generation.
|
||||
"""
|
||||
obj=ElGamalobj()
|
||||
# Generate prime p
|
||||
if progress_func:
|
||||
progress_func('p\n')
|
||||
obj.p=bignum(getPrime(bits, randfunc))
|
||||
# Generate random number g
|
||||
if progress_func:
|
||||
progress_func('g\n')
|
||||
size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p
|
||||
if size<1:
|
||||
size=bits-1
|
||||
while (1):
|
||||
obj.g=bignum(getPrime(size, randfunc))
|
||||
if obj.g < obj.p:
|
||||
break
|
||||
size=(size+1) % bits
|
||||
if size==0:
|
||||
size=4
|
||||
# Generate random number x
|
||||
if progress_func:
|
||||
progress_func('x\n')
|
||||
while (1):
|
||||
size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p
|
||||
if size>2:
|
||||
break
|
||||
while (1):
|
||||
obj.x=bignum(getPrime(size, randfunc))
|
||||
if obj.x < obj.p:
|
||||
break
|
||||
size = (size+1) % bits
|
||||
if size==0:
|
||||
size=4
|
||||
if progress_func:
|
||||
progress_func('y\n')
|
||||
obj.y = pow(obj.g, obj.x, obj.p)
|
||||
return obj
|
||||
|
||||
def construct(tuple):
|
||||
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)))
|
||||
: ElGamalobj
|
||||
Construct an ElGamal key from a 3- or 4-tuple of numbers.
|
||||
"""
|
||||
|
||||
obj=ElGamalobj()
|
||||
if len(tuple) not in [3,4]:
|
||||
raise error, 'argument for construct() wrong length'
|
||||
for i in range(len(tuple)):
|
||||
field = obj.keydata[i]
|
||||
setattr(obj, field, tuple[i])
|
||||
return obj
|
||||
|
||||
class ElGamalobj(pubkey):
|
||||
keydata=['p', 'g', 'y', 'x']
|
||||
|
||||
def _encrypt(self, M, K):
|
||||
a=pow(self.g, K, self.p)
|
||||
b=( M*pow(self.y, K, self.p) ) % self.p
|
||||
return ( a,b )
|
||||
|
||||
def _decrypt(self, M):
|
||||
if (not hasattr(self, 'x')):
|
||||
raise error, 'Private key not available in this object'
|
||||
ax=pow(M[0], self.x, self.p)
|
||||
plaintext=(M[1] * inverse(ax, self.p ) ) % self.p
|
||||
return plaintext
|
||||
|
||||
def _sign(self, M, K):
|
||||
if (not hasattr(self, 'x')):
|
||||
raise error, 'Private key not available in this object'
|
||||
p1=self.p-1
|
||||
if (GCD(K, p1)!=1):
|
||||
raise error, 'Bad K value: GCD(K,p-1)!=1'
|
||||
a=pow(self.g, K, self.p)
|
||||
t=(M-self.x*a) % p1
|
||||
while t<0: t=t+p1
|
||||
b=(t*inverse(K, p1)) % p1
|
||||
return (a, b)
|
||||
|
||||
def _verify(self, M, sig):
|
||||
v1=pow(self.y, sig[0], self.p)
|
||||
v1=(v1*pow(sig[0], sig[1], self.p)) % self.p
|
||||
v2=pow(self.g, M, self.p)
|
||||
if v1==v2:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def size(self):
|
||||
"Return the maximum number of bits that can be handled by this key."
|
||||
return number.size(self.p) - 1
|
||||
|
||||
def has_private(self):
|
||||
"""Return a Boolean denoting whether the object contains
|
||||
private components."""
|
||||
if hasattr(self, 'x'):
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def publickey(self):
|
||||
"""Return a new key object containing only the public information."""
|
||||
return construct((self.p, self.g, self.y))
|
||||
|
||||
|
||||
object=ElGamalobj
|
||||
256
python/gdata/Crypto/PublicKey/RSA.py
Normal file
256
python/gdata/Crypto/PublicKey/RSA.py
Normal file
@@ -0,0 +1,256 @@
|
||||
#
|
||||
# RSA.py : RSA encryption/decryption
|
||||
#
|
||||
# Part of the Python Cryptography Toolkit
|
||||
#
|
||||
# Distribute and use freely; there are no restrictions on further
|
||||
# dissemination and usage except those imposed by the laws of your
|
||||
# country of residence. This software is provided "as is" without
|
||||
# warranty of fitness for use or suitability for any purpose, express
|
||||
# or implied. Use at your own risk or not at all.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: RSA.py,v 1.20 2004/05/06 12:52:54 akuchling Exp $"
|
||||
|
||||
from Crypto.PublicKey import pubkey
|
||||
from Crypto.Util import number
|
||||
|
||||
try:
|
||||
from Crypto.PublicKey import _fastmath
|
||||
except ImportError:
|
||||
_fastmath = None
|
||||
|
||||
class error (Exception):
|
||||
pass
|
||||
|
||||
def generate(bits, randfunc, progress_func=None):
|
||||
"""generate(bits:int, randfunc:callable, progress_func:callable)
|
||||
|
||||
Generate an RSA key of length 'bits', using 'randfunc' to get
|
||||
random data and 'progress_func', if present, to display
|
||||
the progress of the key generation.
|
||||
"""
|
||||
obj=RSAobj()
|
||||
|
||||
# Generate the prime factors of n
|
||||
if progress_func:
|
||||
progress_func('p,q\n')
|
||||
p = q = 1L
|
||||
while number.size(p*q) < bits:
|
||||
p = pubkey.getPrime(bits/2, randfunc)
|
||||
q = pubkey.getPrime(bits/2, randfunc)
|
||||
|
||||
# p shall be smaller than q (for calc of u)
|
||||
if p > q:
|
||||
(p, q)=(q, p)
|
||||
obj.p = p
|
||||
obj.q = q
|
||||
|
||||
if progress_func:
|
||||
progress_func('u\n')
|
||||
obj.u = pubkey.inverse(obj.p, obj.q)
|
||||
obj.n = obj.p*obj.q
|
||||
|
||||
obj.e = 65537L
|
||||
if progress_func:
|
||||
progress_func('d\n')
|
||||
obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1))
|
||||
|
||||
assert bits <= 1+obj.size(), "Generated key is too small"
|
||||
|
||||
return obj
|
||||
|
||||
def construct(tuple):
|
||||
"""construct(tuple:(long,) : RSAobj
|
||||
Construct an RSA object from a 2-, 3-, 5-, or 6-tuple of numbers.
|
||||
"""
|
||||
|
||||
obj=RSAobj()
|
||||
if len(tuple) not in [2,3,5,6]:
|
||||
raise error, 'argument for construct() wrong length'
|
||||
for i in range(len(tuple)):
|
||||
field = obj.keydata[i]
|
||||
setattr(obj, field, tuple[i])
|
||||
if len(tuple) >= 5:
|
||||
# Ensure p is smaller than q
|
||||
if obj.p>obj.q:
|
||||
(obj.p, obj.q)=(obj.q, obj.p)
|
||||
|
||||
if len(tuple) == 5:
|
||||
# u not supplied, so we're going to have to compute it.
|
||||
obj.u=pubkey.inverse(obj.p, obj.q)
|
||||
|
||||
return obj
|
||||
|
||||
class RSAobj(pubkey.pubkey):
|
||||
keydata = ['n', 'e', 'd', 'p', 'q', 'u']
|
||||
def _encrypt(self, plaintext, K=''):
|
||||
if self.n<=plaintext:
|
||||
raise error, 'Plaintext too large'
|
||||
return (pow(plaintext, self.e, self.n),)
|
||||
|
||||
def _decrypt(self, ciphertext):
|
||||
if (not hasattr(self, 'd')):
|
||||
raise error, 'Private key not available in this object'
|
||||
if self.n<=ciphertext[0]:
|
||||
raise error, 'Ciphertext too large'
|
||||
return pow(ciphertext[0], self.d, self.n)
|
||||
|
||||
def _sign(self, M, K=''):
|
||||
return (self._decrypt((M,)),)
|
||||
|
||||
def _verify(self, M, sig):
|
||||
m2=self._encrypt(sig[0])
|
||||
if m2[0]==M:
|
||||
return 1
|
||||
else: return 0
|
||||
|
||||
def _blind(self, M, B):
|
||||
tmp = pow(B, self.e, self.n)
|
||||
return (M * tmp) % self.n
|
||||
|
||||
def _unblind(self, M, B):
|
||||
tmp = pubkey.inverse(B, self.n)
|
||||
return (M * tmp) % self.n
|
||||
|
||||
def can_blind (self):
|
||||
"""can_blind() : bool
|
||||
Return a Boolean value recording whether this algorithm can
|
||||
blind data. (This does not imply that this
|
||||
particular key object has the private information required to
|
||||
to blind a message.)
|
||||
"""
|
||||
return 1
|
||||
|
||||
def size(self):
|
||||
"""size() : int
|
||||
Return the maximum number of bits that can be handled by this key.
|
||||
"""
|
||||
return number.size(self.n) - 1
|
||||
|
||||
def has_private(self):
|
||||
"""has_private() : bool
|
||||
Return a Boolean denoting whether the object contains
|
||||
private components.
|
||||
"""
|
||||
if hasattr(self, 'd'):
|
||||
return 1
|
||||
else: return 0
|
||||
|
||||
def publickey(self):
|
||||
"""publickey(): RSAobj
|
||||
Return a new key object containing only the public key information.
|
||||
"""
|
||||
return construct((self.n, self.e))
|
||||
|
||||
class RSAobj_c(pubkey.pubkey):
|
||||
keydata = ['n', 'e', 'd', 'p', 'q', 'u']
|
||||
|
||||
def __init__(self, key):
|
||||
self.key = key
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr in self.keydata:
|
||||
return getattr(self.key, attr)
|
||||
else:
|
||||
if self.__dict__.has_key(attr):
|
||||
self.__dict__[attr]
|
||||
else:
|
||||
raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr)
|
||||
|
||||
def __getstate__(self):
|
||||
d = {}
|
||||
for k in self.keydata:
|
||||
if hasattr(self.key, k):
|
||||
d[k]=getattr(self.key, k)
|
||||
return d
|
||||
|
||||
def __setstate__(self, state):
|
||||
n,e = state['n'], state['e']
|
||||
if not state.has_key('d'):
|
||||
self.key = _fastmath.rsa_construct(n,e)
|
||||
else:
|
||||
d = state['d']
|
||||
if not state.has_key('q'):
|
||||
self.key = _fastmath.rsa_construct(n,e,d)
|
||||
else:
|
||||
p, q, u = state['p'], state['q'], state['u']
|
||||
self.key = _fastmath.rsa_construct(n,e,d,p,q,u)
|
||||
|
||||
def _encrypt(self, plain, K):
|
||||
return (self.key._encrypt(plain),)
|
||||
|
||||
def _decrypt(self, cipher):
|
||||
return self.key._decrypt(cipher[0])
|
||||
|
||||
def _sign(self, M, K):
|
||||
return (self.key._sign(M),)
|
||||
|
||||
def _verify(self, M, sig):
|
||||
return self.key._verify(M, sig[0])
|
||||
|
||||
def _blind(self, M, B):
|
||||
return self.key._blind(M, B)
|
||||
|
||||
def _unblind(self, M, B):
|
||||
return self.key._unblind(M, B)
|
||||
|
||||
def can_blind (self):
|
||||
return 1
|
||||
|
||||
def size(self):
|
||||
return self.key.size()
|
||||
|
||||
def has_private(self):
|
||||
return self.key.has_private()
|
||||
|
||||
def publickey(self):
|
||||
return construct_c((self.key.n, self.key.e))
|
||||
|
||||
def generate_c(bits, randfunc, progress_func = None):
|
||||
# Generate the prime factors of n
|
||||
if progress_func:
|
||||
progress_func('p,q\n')
|
||||
|
||||
p = q = 1L
|
||||
while number.size(p*q) < bits:
|
||||
p = pubkey.getPrime(bits/2, randfunc)
|
||||
q = pubkey.getPrime(bits/2, randfunc)
|
||||
|
||||
# p shall be smaller than q (for calc of u)
|
||||
if p > q:
|
||||
(p, q)=(q, p)
|
||||
if progress_func:
|
||||
progress_func('u\n')
|
||||
u=pubkey.inverse(p, q)
|
||||
n=p*q
|
||||
|
||||
e = 65537L
|
||||
if progress_func:
|
||||
progress_func('d\n')
|
||||
d=pubkey.inverse(e, (p-1)*(q-1))
|
||||
key = _fastmath.rsa_construct(n,e,d,p,q,u)
|
||||
obj = RSAobj_c(key)
|
||||
|
||||
## print p
|
||||
## print q
|
||||
## print number.size(p), number.size(q), number.size(q*p),
|
||||
## print obj.size(), bits
|
||||
assert bits <= 1+obj.size(), "Generated key is too small"
|
||||
return obj
|
||||
|
||||
|
||||
def construct_c(tuple):
|
||||
key = apply(_fastmath.rsa_construct, tuple)
|
||||
return RSAobj_c(key)
|
||||
|
||||
object = RSAobj
|
||||
|
||||
generate_py = generate
|
||||
construct_py = construct
|
||||
|
||||
if _fastmath:
|
||||
#print "using C version of RSA"
|
||||
generate = generate_c
|
||||
construct = construct_c
|
||||
error = _fastmath.error
|
||||
17
python/gdata/Crypto/PublicKey/__init__.py
Normal file
17
python/gdata/Crypto/PublicKey/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""Public-key encryption and signature algorithms.
|
||||
|
||||
Public-key encryption uses two different keys, one for encryption and
|
||||
one for decryption. The encryption key can be made public, and the
|
||||
decryption key is kept private. Many public-key algorithms can also
|
||||
be used to sign messages, and some can *only* be used for signatures.
|
||||
|
||||
Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only)
|
||||
Crypto.PublicKey.ElGamal (Signing and encryption)
|
||||
Crypto.PublicKey.RSA (Signing, encryption, and blinding)
|
||||
Crypto.PublicKey.qNEW (Signature only)
|
||||
|
||||
"""
|
||||
|
||||
__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW']
|
||||
__revision__ = "$Id: __init__.py,v 1.4 2003/04/03 20:27:13 akuchling Exp $"
|
||||
|
||||
172
python/gdata/Crypto/PublicKey/pubkey.py
Normal file
172
python/gdata/Crypto/PublicKey/pubkey.py
Normal file
@@ -0,0 +1,172 @@
|
||||
#
|
||||
# pubkey.py : Internal functions for public key operations
|
||||
#
|
||||
# Part of the Python Cryptography Toolkit
|
||||
#
|
||||
# Distribute and use freely; there are no restrictions on further
|
||||
# dissemination and usage except those imposed by the laws of your
|
||||
# country of residence. This software is provided "as is" without
|
||||
# warranty of fitness for use or suitability for any purpose, express
|
||||
# or implied. Use at your own risk or not at all.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: pubkey.py,v 1.11 2003/04/03 20:36:14 akuchling Exp $"
|
||||
|
||||
import types, warnings
|
||||
from Crypto.Util.number import *
|
||||
|
||||
# Basic public key class
|
||||
class pubkey:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __getstate__(self):
|
||||
"""To keep key objects platform-independent, the key data is
|
||||
converted to standard Python long integers before being
|
||||
written out. It will then be reconverted as necessary on
|
||||
restoration."""
|
||||
d=self.__dict__
|
||||
for key in self.keydata:
|
||||
if d.has_key(key): d[key]=long(d[key])
|
||||
return d
|
||||
|
||||
def __setstate__(self, d):
|
||||
"""On unpickling a key object, the key data is converted to the big
|
||||
number representation being used, whether that is Python long
|
||||
integers, MPZ objects, or whatever."""
|
||||
for key in self.keydata:
|
||||
if d.has_key(key): self.__dict__[key]=bignum(d[key])
|
||||
|
||||
def encrypt(self, plaintext, K):
|
||||
"""encrypt(plaintext:string|long, K:string|long) : tuple
|
||||
Encrypt the string or integer plaintext. K is a random
|
||||
parameter required by some algorithms.
|
||||
"""
|
||||
wasString=0
|
||||
if isinstance(plaintext, types.StringType):
|
||||
plaintext=bytes_to_long(plaintext) ; wasString=1
|
||||
if isinstance(K, types.StringType):
|
||||
K=bytes_to_long(K)
|
||||
ciphertext=self._encrypt(plaintext, K)
|
||||
if wasString: return tuple(map(long_to_bytes, ciphertext))
|
||||
else: return ciphertext
|
||||
|
||||
def decrypt(self, ciphertext):
|
||||
"""decrypt(ciphertext:tuple|string|long): string
|
||||
Decrypt 'ciphertext' using this key.
|
||||
"""
|
||||
wasString=0
|
||||
if not isinstance(ciphertext, types.TupleType):
|
||||
ciphertext=(ciphertext,)
|
||||
if isinstance(ciphertext[0], types.StringType):
|
||||
ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1
|
||||
plaintext=self._decrypt(ciphertext)
|
||||
if wasString: return long_to_bytes(plaintext)
|
||||
else: return plaintext
|
||||
|
||||
def sign(self, M, K):
|
||||
"""sign(M : string|long, K:string|long) : tuple
|
||||
Return a tuple containing the signature for the message M.
|
||||
K is a random parameter required by some algorithms.
|
||||
"""
|
||||
if (not self.has_private()):
|
||||
raise error, 'Private key not available in this object'
|
||||
if isinstance(M, types.StringType): M=bytes_to_long(M)
|
||||
if isinstance(K, types.StringType): K=bytes_to_long(K)
|
||||
return self._sign(M, K)
|
||||
|
||||
def verify (self, M, signature):
|
||||
"""verify(M:string|long, signature:tuple) : bool
|
||||
Verify that the signature is valid for the message M;
|
||||
returns true if the signature checks out.
|
||||
"""
|
||||
if isinstance(M, types.StringType): M=bytes_to_long(M)
|
||||
return self._verify(M, signature)
|
||||
|
||||
# alias to compensate for the old validate() name
|
||||
def validate (self, M, signature):
|
||||
warnings.warn("validate() method name is obsolete; use verify()",
|
||||
DeprecationWarning)
|
||||
|
||||
def blind(self, M, B):
|
||||
"""blind(M : string|long, B : string|long) : string|long
|
||||
Blind message M using blinding factor B.
|
||||
"""
|
||||
wasString=0
|
||||
if isinstance(M, types.StringType):
|
||||
M=bytes_to_long(M) ; wasString=1
|
||||
if isinstance(B, types.StringType): B=bytes_to_long(B)
|
||||
blindedmessage=self._blind(M, B)
|
||||
if wasString: return long_to_bytes(blindedmessage)
|
||||
else: return blindedmessage
|
||||
|
||||
def unblind(self, M, B):
|
||||
"""unblind(M : string|long, B : string|long) : string|long
|
||||
Unblind message M using blinding factor B.
|
||||
"""
|
||||
wasString=0
|
||||
if isinstance(M, types.StringType):
|
||||
M=bytes_to_long(M) ; wasString=1
|
||||
if isinstance(B, types.StringType): B=bytes_to_long(B)
|
||||
unblindedmessage=self._unblind(M, B)
|
||||
if wasString: return long_to_bytes(unblindedmessage)
|
||||
else: return unblindedmessage
|
||||
|
||||
|
||||
# The following methods will usually be left alone, except for
|
||||
# signature-only algorithms. They both return Boolean values
|
||||
# recording whether this key's algorithm can sign and encrypt.
|
||||
def can_sign (self):
|
||||
"""can_sign() : bool
|
||||
Return a Boolean value recording whether this algorithm can
|
||||
generate signatures. (This does not imply that this
|
||||
particular key object has the private information required to
|
||||
to generate a signature.)
|
||||
"""
|
||||
return 1
|
||||
|
||||
def can_encrypt (self):
|
||||
"""can_encrypt() : bool
|
||||
Return a Boolean value recording whether this algorithm can
|
||||
encrypt data. (This does not imply that this
|
||||
particular key object has the private information required to
|
||||
to decrypt a message.)
|
||||
"""
|
||||
return 1
|
||||
|
||||
def can_blind (self):
|
||||
"""can_blind() : bool
|
||||
Return a Boolean value recording whether this algorithm can
|
||||
blind data. (This does not imply that this
|
||||
particular key object has the private information required to
|
||||
to blind a message.)
|
||||
"""
|
||||
return 0
|
||||
|
||||
# The following methods will certainly be overridden by
|
||||
# subclasses.
|
||||
|
||||
def size (self):
|
||||
"""size() : int
|
||||
Return the maximum number of bits that can be handled by this key.
|
||||
"""
|
||||
return 0
|
||||
|
||||
def has_private (self):
|
||||
"""has_private() : bool
|
||||
Return a Boolean denoting whether the object contains
|
||||
private components.
|
||||
"""
|
||||
return 0
|
||||
|
||||
def publickey (self):
|
||||
"""publickey(): object
|
||||
Return a new key object containing only the public information.
|
||||
"""
|
||||
return self
|
||||
|
||||
def __eq__ (self, other):
|
||||
"""__eq__(other): 0, 1
|
||||
Compare us to other for equality.
|
||||
"""
|
||||
return self.__getstate__() == other.__getstate__()
|
||||
170
python/gdata/Crypto/PublicKey/qNEW.py
Normal file
170
python/gdata/Crypto/PublicKey/qNEW.py
Normal file
@@ -0,0 +1,170 @@
|
||||
#
|
||||
# qNEW.py : The q-NEW signature algorithm.
|
||||
#
|
||||
# Part of the Python Cryptography Toolkit
|
||||
#
|
||||
# Distribute and use freely; there are no restrictions on further
|
||||
# dissemination and usage except those imposed by the laws of your
|
||||
# country of residence. This software is provided "as is" without
|
||||
# warranty of fitness for use or suitability for any purpose, express
|
||||
# or implied. Use at your own risk or not at all.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
|
||||
|
||||
from Crypto.PublicKey import pubkey
|
||||
from Crypto.Util.number import *
|
||||
from Crypto.Hash import SHA
|
||||
|
||||
class error (Exception):
|
||||
pass
|
||||
|
||||
HASHBITS = 160 # Size of SHA digests
|
||||
|
||||
def generate(bits, randfunc, progress_func=None):
|
||||
"""generate(bits:int, randfunc:callable, progress_func:callable)
|
||||
|
||||
Generate a qNEW key of length 'bits', using 'randfunc' to get
|
||||
random data and 'progress_func', if present, to display
|
||||
the progress of the key generation.
|
||||
"""
|
||||
obj=qNEWobj()
|
||||
|
||||
# Generate prime numbers p and q. q is a 160-bit prime
|
||||
# number. p is another prime number (the modulus) whose bit
|
||||
# size is chosen by the caller, and is generated so that p-1
|
||||
# is a multiple of q.
|
||||
#
|
||||
# Note that only a single seed is used to
|
||||
# generate p and q; if someone generates a key for you, you can
|
||||
# use the seed to duplicate the key generation. This can
|
||||
# protect you from someone generating values of p,q that have
|
||||
# some special form that's easy to break.
|
||||
if progress_func:
|
||||
progress_func('p,q\n')
|
||||
while (1):
|
||||
obj.q = getPrime(160, randfunc)
|
||||
# assert pow(2, 159L)<obj.q<pow(2, 160L)
|
||||
obj.seed = S = long_to_bytes(obj.q)
|
||||
C, N, V = 0, 2, {}
|
||||
# Compute b and n such that bits-1 = b + n*HASHBITS
|
||||
n= (bits-1) / HASHBITS
|
||||
b= (bits-1) % HASHBITS ; powb=2L << b
|
||||
powL1=pow(long(2), bits-1)
|
||||
while C<4096:
|
||||
# The V array will contain (bits-1) bits of random
|
||||
# data, that are assembled to produce a candidate
|
||||
# value for p.
|
||||
for k in range(0, n+1):
|
||||
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
|
||||
p = V[n] % powb
|
||||
for k in range(n-1, -1, -1):
|
||||
p= (p << long(HASHBITS) )+V[k]
|
||||
p = p+powL1 # Ensure the high bit is set
|
||||
|
||||
# Ensure that p-1 is a multiple of q
|
||||
p = p - (p % (2*obj.q)-1)
|
||||
|
||||
# If p is still the right size, and it's prime, we're done!
|
||||
if powL1<=p and isPrime(p):
|
||||
break
|
||||
|
||||
# Otherwise, increment the counter and try again
|
||||
C, N = C+1, N+n+1
|
||||
if C<4096:
|
||||
break # Ended early, so exit the while loop
|
||||
if progress_func:
|
||||
progress_func('4096 values of p tried\n')
|
||||
|
||||
obj.p = p
|
||||
power=(p-1)/obj.q
|
||||
|
||||
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
|
||||
# number <p-1, and g>1. g is kept; h can be discarded.
|
||||
if progress_func:
|
||||
progress_func('h,g\n')
|
||||
while (1):
|
||||
h=bytes_to_long(randfunc(bits)) % (p-1)
|
||||
g=pow(h, power, p)
|
||||
if 1<h<p-1 and g>1:
|
||||
break
|
||||
obj.g=g
|
||||
|
||||
# x is the private key information, and is
|
||||
# just a random number between 0 and q.
|
||||
# y=g**x mod p, and is part of the public information.
|
||||
if progress_func:
|
||||
progress_func('x,y\n')
|
||||
while (1):
|
||||
x=bytes_to_long(randfunc(20))
|
||||
if 0 < x < obj.q:
|
||||
break
|
||||
obj.x, obj.y=x, pow(g, x, p)
|
||||
|
||||
return obj
|
||||
|
||||
# Construct a qNEW object
|
||||
def construct(tuple):
|
||||
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
|
||||
Construct a qNEW object from a 4- or 5-tuple of numbers.
|
||||
"""
|
||||
obj=qNEWobj()
|
||||
if len(tuple) not in [4,5]:
|
||||
raise error, 'argument for construct() wrong length'
|
||||
for i in range(len(tuple)):
|
||||
field = obj.keydata[i]
|
||||
setattr(obj, field, tuple[i])
|
||||
return obj
|
||||
|
||||
class qNEWobj(pubkey.pubkey):
|
||||
keydata=['p', 'q', 'g', 'y', 'x']
|
||||
|
||||
def _sign(self, M, K=''):
|
||||
if (self.q<=K):
|
||||
raise error, 'K is greater than q'
|
||||
if M<0:
|
||||
raise error, 'Illegal value of M (<0)'
|
||||
if M>=pow(2,161L):
|
||||
raise error, 'Illegal value of M (too large)'
|
||||
r=pow(self.g, K, self.p) % self.q
|
||||
s=(K- (r*M*self.x % self.q)) % self.q
|
||||
return (r,s)
|
||||
def _verify(self, M, sig):
|
||||
r, s = sig
|
||||
if r<=0 or r>=self.q or s<=0 or s>=self.q:
|
||||
return 0
|
||||
if M<0:
|
||||
raise error, 'Illegal value of M (<0)'
|
||||
if M<=0 or M>=pow(2,161L):
|
||||
return 0
|
||||
v1 = pow(self.g, s, self.p)
|
||||
v2 = pow(self.y, M*r, self.p)
|
||||
v = ((v1*v2) % self.p)
|
||||
v = v % self.q
|
||||
if v==r:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def size(self):
|
||||
"Return the maximum number of bits that can be handled by this key."
|
||||
return 160
|
||||
|
||||
def has_private(self):
|
||||
"""Return a Boolean denoting whether the object contains
|
||||
private components."""
|
||||
return hasattr(self, 'x')
|
||||
|
||||
def can_sign(self):
|
||||
"""Return a Boolean value recording whether this algorithm can generate signatures."""
|
||||
return 1
|
||||
|
||||
def can_encrypt(self):
|
||||
"""Return a Boolean value recording whether this algorithm can encrypt data."""
|
||||
return 0
|
||||
|
||||
def publickey(self):
|
||||
"""Return a new key object containing only the public information."""
|
||||
return construct((self.p, self.q, self.g, self.y))
|
||||
|
||||
object = qNEWobj
|
||||
|
||||
342
python/gdata/Crypto/Util/RFC1751.py
Normal file
342
python/gdata/Crypto/Util/RFC1751.py
Normal file
@@ -0,0 +1,342 @@
|
||||
#!/usr/local/bin/python
|
||||
# rfc1751.py : Converts between 128-bit strings and a human-readable
|
||||
# sequence of words, as defined in RFC1751: "A Convention for
|
||||
# Human-Readable 128-bit Keys", by Daniel L. McDonald.
|
||||
|
||||
__revision__ = "$Id: RFC1751.py,v 1.6 2003/04/04 15:15:10 akuchling Exp $"
|
||||
|
||||
|
||||
import string, binascii
|
||||
|
||||
binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101',
|
||||
6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011',
|
||||
12:'1100', 13:'1101', 14:'1110', 15:'1111'}
|
||||
|
||||
def _key2bin(s):
|
||||
"Convert a key into a string of binary digits"
|
||||
kl=map(lambda x: ord(x), s)
|
||||
kl=map(lambda x: binary[x/16]+binary[x&15], kl)
|
||||
return ''.join(kl)
|
||||
|
||||
def _extract(key, start, length):
|
||||
"""Extract a bitstring from a string of binary digits, and return its
|
||||
numeric value."""
|
||||
k=key[start:start+length]
|
||||
return reduce(lambda x,y: x*2+ord(y)-48, k, 0)
|
||||
|
||||
def key_to_english (key):
|
||||
"""key_to_english(key:string) : string
|
||||
Transform an arbitrary key into a string containing English words.
|
||||
The key length must be a multiple of 8.
|
||||
"""
|
||||
english=''
|
||||
for index in range(0, len(key), 8): # Loop over 8-byte subkeys
|
||||
subkey=key[index:index+8]
|
||||
# Compute the parity of the key
|
||||
skbin=_key2bin(subkey) ; p=0
|
||||
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
|
||||
# Append parity bits to the subkey
|
||||
skbin=_key2bin(subkey+chr((p<<6) & 255))
|
||||
for i in range(0, 64, 11):
|
||||
english=english+wordlist[_extract(skbin, i, 11)]+' '
|
||||
|
||||
return english[:-1] # Remove the trailing space
|
||||
|
||||
def english_to_key (str):
|
||||
"""english_to_key(string):string
|
||||
Transform a string into a corresponding key.
|
||||
The string must contain words separated by whitespace; the number
|
||||
of words must be a multiple of 6.
|
||||
"""
|
||||
|
||||
L=string.split(string.upper(str)) ; key=''
|
||||
for index in range(0, len(L), 6):
|
||||
sublist=L[index:index+6] ; char=9*[0] ; bits=0
|
||||
for i in sublist:
|
||||
index = wordlist.index(i)
|
||||
shift = (8-(bits+11)%8) %8
|
||||
y = index << shift
|
||||
cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff
|
||||
if (shift>5):
|
||||
char[bits/8] = char[bits/8] | cl
|
||||
char[bits/8+1] = char[bits/8+1] | cc
|
||||
char[bits/8+2] = char[bits/8+2] | cr
|
||||
elif shift>-3:
|
||||
char[bits/8] = char[bits/8] | cc
|
||||
char[bits/8+1] = char[bits/8+1] | cr
|
||||
else: char[bits/8] = char[bits/8] | cr
|
||||
bits=bits+11
|
||||
subkey=reduce(lambda x,y:x+chr(y), char, '')
|
||||
|
||||
# Check the parity of the resulting key
|
||||
skbin=_key2bin(subkey)
|
||||
p=0
|
||||
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
|
||||
if (p&3) != _extract(skbin, 64, 2):
|
||||
raise ValueError, "Parity error in resulting key"
|
||||
key=key+subkey[0:8]
|
||||
return key
|
||||
|
||||
wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD",
|
||||
"AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA",
|
||||
"AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK",
|
||||
"ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE",
|
||||
"AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM",
|
||||
"BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET",
|
||||
"BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO",
|
||||
"BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT",
|
||||
"BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT",
|
||||
"CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY",
|
||||
"CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN",
|
||||
"DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG",
|
||||
"DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB",
|
||||
"DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO",
|
||||
"ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE",
|
||||
"EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW",
|
||||
"FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR",
|
||||
"FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP",
|
||||
"GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO",
|
||||
"GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD",
|
||||
"HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM",
|
||||
"HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT",
|
||||
"HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE",
|
||||
"HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL",
|
||||
"INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT",
|
||||
"ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET",
|
||||
"JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT",
|
||||
"KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB",
|
||||
"LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE",
|
||||
"LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT",
|
||||
"LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG",
|
||||
"LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW",
|
||||
"MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT",
|
||||
"MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG",
|
||||
"MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED",
|
||||
"NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD",
|
||||
"NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF",
|
||||
"OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL",
|
||||
"OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT",
|
||||
"OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD",
|
||||
"PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG",
|
||||
"PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT",
|
||||
"PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB",
|
||||
"PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT",
|
||||
"RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM",
|
||||
"RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB",
|
||||
"RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM",
|
||||
"SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET",
|
||||
"SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY",
|
||||
"SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY",
|
||||
"SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN",
|
||||
"TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE",
|
||||
"TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP",
|
||||
"TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP",
|
||||
"US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS",
|
||||
"WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT",
|
||||
"WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE",
|
||||
"YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT",
|
||||
"ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS",
|
||||
"ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE",
|
||||
"AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA",
|
||||
"ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN",
|
||||
"AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW",
|
||||
"ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA",
|
||||
"ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM",
|
||||
"AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW",
|
||||
"AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL",
|
||||
"BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM",
|
||||
"BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK",
|
||||
"BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH",
|
||||
"BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT",
|
||||
"BEAU", "BECK", "BEEF", "BEEN", "BEER",
|
||||
"BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN",
|
||||
"BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE",
|
||||
"BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE",
|
||||
"BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT",
|
||||
"BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK",
|
||||
"BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT",
|
||||
"BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK",
|
||||
"BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS",
|
||||
"BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN",
|
||||
"BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD",
|
||||
"BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG",
|
||||
"BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST",
|
||||
"BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF",
|
||||
"CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL",
|
||||
"CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL",
|
||||
"CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF",
|
||||
"CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG",
|
||||
"CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY",
|
||||
"CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA",
|
||||
"COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN",
|
||||
"COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK",
|
||||
"COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST",
|
||||
"COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB",
|
||||
"CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY",
|
||||
"CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE",
|
||||
"DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN",
|
||||
"DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS",
|
||||
"DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED",
|
||||
"DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK",
|
||||
"DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT",
|
||||
"DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES",
|
||||
"DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA",
|
||||
"DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG",
|
||||
"DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK",
|
||||
"DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK",
|
||||
"DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST",
|
||||
"EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT",
|
||||
"EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT",
|
||||
"EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED",
|
||||
"FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL",
|
||||
"FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT",
|
||||
"FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST",
|
||||
"FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE",
|
||||
"FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE",
|
||||
"FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW",
|
||||
"FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM",
|
||||
"FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL",
|
||||
"FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL",
|
||||
"FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY",
|
||||
"FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY",
|
||||
"FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA",
|
||||
"GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH",
|
||||
"GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE",
|
||||
"GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT",
|
||||
"GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN",
|
||||
"GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD",
|
||||
"GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG",
|
||||
"GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB",
|
||||
"GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN",
|
||||
"GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH",
|
||||
"GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR",
|
||||
"HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK",
|
||||
"HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE",
|
||||
"HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR",
|
||||
"HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL",
|
||||
"HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN",
|
||||
"HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT",
|
||||
"HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE",
|
||||
"HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK",
|
||||
"HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL",
|
||||
"HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK",
|
||||
"HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE",
|
||||
"HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH",
|
||||
"INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE",
|
||||
"ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE",
|
||||
"JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL",
|
||||
"JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN",
|
||||
"JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY",
|
||||
"JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST",
|
||||
"JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL",
|
||||
"KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL",
|
||||
"KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW",
|
||||
"KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD",
|
||||
"KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN",
|
||||
"LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD",
|
||||
"LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS",
|
||||
"LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER",
|
||||
"LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST",
|
||||
"LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU",
|
||||
"LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB",
|
||||
"LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST",
|
||||
"LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE",
|
||||
"LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD",
|
||||
"LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK",
|
||||
"LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE",
|
||||
"LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE",
|
||||
"MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI",
|
||||
"MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK",
|
||||
"MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE",
|
||||
"MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK",
|
||||
"MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH",
|
||||
"MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT",
|
||||
"MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS",
|
||||
"MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD",
|
||||
"MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON",
|
||||
"MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH",
|
||||
"MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK",
|
||||
"MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL",
|
||||
"NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR",
|
||||
"NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS",
|
||||
"NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA",
|
||||
"NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON",
|
||||
"NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB",
|
||||
"OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY",
|
||||
"OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE",
|
||||
"ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS",
|
||||
"OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY",
|
||||
"OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT",
|
||||
"RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE",
|
||||
"RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR",
|
||||
"RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA",
|
||||
"REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT",
|
||||
"RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD",
|
||||
"ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME",
|
||||
"ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS",
|
||||
"ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY",
|
||||
"RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE",
|
||||
"RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE",
|
||||
"SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE",
|
||||
"SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR",
|
||||
"SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK",
|
||||
"SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS",
|
||||
"SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN",
|
||||
"SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE",
|
||||
"SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE",
|
||||
"SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW",
|
||||
"SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY",
|
||||
"SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT",
|
||||
"SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB",
|
||||
"SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA",
|
||||
"SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE",
|
||||
"SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR",
|
||||
"STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH",
|
||||
"SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF",
|
||||
"SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM",
|
||||
"TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK",
|
||||
"TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM",
|
||||
"TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS",
|
||||
"TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN",
|
||||
"THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER",
|
||||
"TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY",
|
||||
"TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG",
|
||||
"TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR",
|
||||
"TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG",
|
||||
"TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE",
|
||||
"TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK",
|
||||
"TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER",
|
||||
"USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST",
|
||||
"VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY",
|
||||
"VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE",
|
||||
"WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK",
|
||||
"WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM",
|
||||
"WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY",
|
||||
"WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR",
|
||||
"WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM",
|
||||
"WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE",
|
||||
"WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE",
|
||||
"WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD",
|
||||
"WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE",
|
||||
"YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR",
|
||||
"YELL", "YOGA", "YOKE" ]
|
||||
|
||||
if __name__=='__main__':
|
||||
data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
|
||||
('CCAC2AED591056BE4F90FD441C534766',
|
||||
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
|
||||
('EFF81F9BFBC65350920CDD7416DE8009',
|
||||
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
|
||||
]
|
||||
|
||||
for key, words in data:
|
||||
print 'Trying key', key
|
||||
key=binascii.a2b_hex(key)
|
||||
w2=key_to_english(key)
|
||||
if w2!=words:
|
||||
print 'key_to_english fails on key', repr(key), ', producing', str(w2)
|
||||
k2=english_to_key(words)
|
||||
if k2!=key:
|
||||
print 'english_to_key fails on key', repr(key), ', producing', repr(k2)
|
||||
|
||||
|
||||
16
python/gdata/Crypto/Util/__init__.py
Normal file
16
python/gdata/Crypto/Util/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""Miscellaneous modules
|
||||
|
||||
Contains useful modules that don't belong into any of the
|
||||
other Crypto.* subpackages.
|
||||
|
||||
Crypto.Util.number Number-theoretic functions (primality testing, etc.)
|
||||
Crypto.Util.randpool Random number generation
|
||||
Crypto.Util.RFC1751 Converts between 128-bit keys and human-readable
|
||||
strings of words.
|
||||
|
||||
"""
|
||||
|
||||
__all__ = ['randpool', 'RFC1751', 'number']
|
||||
|
||||
__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:26:00 akuchling Exp $"
|
||||
|
||||
201
python/gdata/Crypto/Util/number.py
Normal file
201
python/gdata/Crypto/Util/number.py
Normal file
@@ -0,0 +1,201 @@
|
||||
#
|
||||
# number.py : Number-theoretic functions
|
||||
#
|
||||
# Part of the Python Cryptography Toolkit
|
||||
#
|
||||
# Distribute and use freely; there are no restrictions on further
|
||||
# dissemination and usage except those imposed by the laws of your
|
||||
# country of residence. This software is provided "as is" without
|
||||
# warranty of fitness for use or suitability for any purpose, express
|
||||
# or implied. Use at your own risk or not at all.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: number.py,v 1.13 2003/04/04 18:21:07 akuchling Exp $"
|
||||
|
||||
bignum = long
|
||||
try:
|
||||
from Crypto.PublicKey import _fastmath
|
||||
except ImportError:
|
||||
_fastmath = None
|
||||
|
||||
# Commented out and replaced with faster versions below
|
||||
## def long2str(n):
|
||||
## s=''
|
||||
## while n>0:
|
||||
## s=chr(n & 255)+s
|
||||
## n=n>>8
|
||||
## return s
|
||||
|
||||
## import types
|
||||
## def str2long(s):
|
||||
## if type(s)!=types.StringType: return s # Integers will be left alone
|
||||
## return reduce(lambda x,y : x*256+ord(y), s, 0L)
|
||||
|
||||
def size (N):
|
||||
"""size(N:long) : int
|
||||
Returns the size of the number N in bits.
|
||||
"""
|
||||
bits, power = 0,1L
|
||||
while N >= power:
|
||||
bits += 1
|
||||
power = power << 1
|
||||
return bits
|
||||
|
||||
def getRandomNumber(N, randfunc):
|
||||
"""getRandomNumber(N:int, randfunc:callable):long
|
||||
Return an N-bit random number."""
|
||||
|
||||
S = randfunc(N/8)
|
||||
odd_bits = N % 8
|
||||
if odd_bits != 0:
|
||||
char = ord(randfunc(1)) >> (8-odd_bits)
|
||||
S = chr(char) + S
|
||||
value = bytes_to_long(S)
|
||||
value |= 2L ** (N-1) # Ensure high bit is set
|
||||
assert size(value) >= N
|
||||
return value
|
||||
|
||||
def GCD(x,y):
|
||||
"""GCD(x:long, y:long): long
|
||||
Return the GCD of x and y.
|
||||
"""
|
||||
x = abs(x) ; y = abs(y)
|
||||
while x > 0:
|
||||
x, y = y % x, x
|
||||
return y
|
||||
|
||||
def inverse(u, v):
|
||||
"""inverse(u:long, u:long):long
|
||||
Return the inverse of u mod v.
|
||||
"""
|
||||
u3, v3 = long(u), long(v)
|
||||
u1, v1 = 1L, 0L
|
||||
while v3 > 0:
|
||||
q=u3 / v3
|
||||
u1, v1 = v1, u1 - v1*q
|
||||
u3, v3 = v3, u3 - v3*q
|
||||
while u1<0:
|
||||
u1 = u1 + v
|
||||
return u1
|
||||
|
||||
# Given a number of bits to generate and a random generation function,
|
||||
# find a prime number of the appropriate size.
|
||||
|
||||
def getPrime(N, randfunc):
|
||||
"""getPrime(N:int, randfunc:callable):long
|
||||
Return a random N-bit prime number.
|
||||
"""
|
||||
|
||||
number=getRandomNumber(N, randfunc) | 1
|
||||
while (not isPrime(number)):
|
||||
number=number+2
|
||||
return number
|
||||
|
||||
def isPrime(N):
|
||||
"""isPrime(N:long):bool
|
||||
Return true if N is prime.
|
||||
"""
|
||||
if N == 1:
|
||||
return 0
|
||||
if N in sieve:
|
||||
return 1
|
||||
for i in sieve:
|
||||
if (N % i)==0:
|
||||
return 0
|
||||
|
||||
# Use the accelerator if available
|
||||
if _fastmath is not None:
|
||||
return _fastmath.isPrime(N)
|
||||
|
||||
# Compute the highest bit that's set in N
|
||||
N1 = N - 1L
|
||||
n = 1L
|
||||
while (n<N):
|
||||
n=n<<1L
|
||||
n = n >> 1L
|
||||
|
||||
# Rabin-Miller test
|
||||
for c in sieve[:7]:
|
||||
a=long(c) ; d=1L ; t=n
|
||||
while (t): # Iterate over the bits in N1
|
||||
x=(d*d) % N
|
||||
if x==1L and d!=1L and d!=N1:
|
||||
return 0 # Square root of 1 found
|
||||
if N1 & t:
|
||||
d=(x*a) % N
|
||||
else:
|
||||
d=x
|
||||
t = t >> 1L
|
||||
if d!=1L:
|
||||
return 0
|
||||
return 1
|
||||
|
||||
# Small primes used for checking primality; these are all the primes
|
||||
# less than 256. This should be enough to eliminate most of the odd
|
||||
# numbers before needing to do a Rabin-Miller test at all.
|
||||
|
||||
sieve=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
|
||||
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
|
||||
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
|
||||
197, 199, 211, 223, 227, 229, 233, 239, 241, 251]
|
||||
|
||||
# Improved conversion functions contributed by Barry Warsaw, after
|
||||
# careful benchmarking
|
||||
|
||||
import struct
|
||||
|
||||
def long_to_bytes(n, blocksize=0):
|
||||
"""long_to_bytes(n:long, blocksize:int) : string
|
||||
Convert a long integer to a byte string.
|
||||
|
||||
If optional blocksize is given and greater than zero, pad the front of the
|
||||
byte string with binary zeros so that the length is a multiple of
|
||||
blocksize.
|
||||
"""
|
||||
# after much testing, this algorithm was deemed to be the fastest
|
||||
s = ''
|
||||
n = long(n)
|
||||
pack = struct.pack
|
||||
while n > 0:
|
||||
s = pack('>I', n & 0xffffffffL) + s
|
||||
n = n >> 32
|
||||
# strip off leading zeros
|
||||
for i in range(len(s)):
|
||||
if s[i] != '\000':
|
||||
break
|
||||
else:
|
||||
# only happens when n == 0
|
||||
s = '\000'
|
||||
i = 0
|
||||
s = s[i:]
|
||||
# add back some pad bytes. this could be done more efficiently w.r.t. the
|
||||
# de-padding being done above, but sigh...
|
||||
if blocksize > 0 and len(s) % blocksize:
|
||||
s = (blocksize - len(s) % blocksize) * '\000' + s
|
||||
return s
|
||||
|
||||
def bytes_to_long(s):
|
||||
"""bytes_to_long(string) : long
|
||||
Convert a byte string to a long integer.
|
||||
|
||||
This is (essentially) the inverse of long_to_bytes().
|
||||
"""
|
||||
acc = 0L
|
||||
unpack = struct.unpack
|
||||
length = len(s)
|
||||
if length % 4:
|
||||
extra = (4 - length % 4)
|
||||
s = '\000' * extra + s
|
||||
length = length + extra
|
||||
for i in range(0, length, 4):
|
||||
acc = (acc << 32) + unpack('>I', s[i:i+4])[0]
|
||||
return acc
|
||||
|
||||
# For backwards compatibility...
|
||||
import warnings
|
||||
def long2str(n, blocksize=0):
|
||||
warnings.warn("long2str() has been replaced by long_to_bytes()")
|
||||
return long_to_bytes(n, blocksize)
|
||||
def str2long(s):
|
||||
warnings.warn("str2long() has been replaced by bytes_to_long()")
|
||||
return bytes_to_long(s)
|
||||
421
python/gdata/Crypto/Util/randpool.py
Normal file
421
python/gdata/Crypto/Util/randpool.py
Normal file
@@ -0,0 +1,421 @@
|
||||
#
|
||||
# randpool.py : Cryptographically strong random number generation
|
||||
#
|
||||
# Part of the Python Cryptography Toolkit
|
||||
#
|
||||
# Distribute and use freely; there are no restrictions on further
|
||||
# dissemination and usage except those imposed by the laws of your
|
||||
# country of residence. This software is provided "as is" without
|
||||
# warranty of fitness for use or suitability for any purpose, express
|
||||
# or implied. Use at your own risk or not at all.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: randpool.py,v 1.14 2004/05/06 12:56:54 akuchling Exp $"
|
||||
|
||||
import time, array, types, warnings, os.path
|
||||
from Crypto.Util.number import long_to_bytes
|
||||
try:
|
||||
import Crypto.Util.winrandom as winrandom
|
||||
except:
|
||||
winrandom = None
|
||||
|
||||
STIRNUM = 3
|
||||
|
||||
class RandomPool:
|
||||
"""randpool.py : Cryptographically strong random number generation.
|
||||
|
||||
The implementation here is similar to the one in PGP. To be
|
||||
cryptographically strong, it must be difficult to determine the RNG's
|
||||
output, whether in the future or the past. This is done by using
|
||||
a cryptographic hash function to "stir" the random data.
|
||||
|
||||
Entropy is gathered in the same fashion as PGP; the highest-resolution
|
||||
clock around is read and the data is added to the random number pool.
|
||||
A conservative estimate of the entropy is then kept.
|
||||
|
||||
If a cryptographically secure random source is available (/dev/urandom
|
||||
on many Unixes, Windows CryptGenRandom on most Windows), then use
|
||||
it.
|
||||
|
||||
Instance Attributes:
|
||||
bits : int
|
||||
Maximum size of pool in bits
|
||||
bytes : int
|
||||
Maximum size of pool in bytes
|
||||
entropy : int
|
||||
Number of bits of entropy in this pool.
|
||||
|
||||
Methods:
|
||||
add_event([s]) : add some entropy to the pool
|
||||
get_bytes(int) : get N bytes of random data
|
||||
randomize([N]) : get N bytes of randomness from external source
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self, numbytes = 160, cipher=None, hash=None):
|
||||
if hash is None:
|
||||
from Crypto.Hash import SHA as hash
|
||||
|
||||
# The cipher argument is vestigial; it was removed from
|
||||
# version 1.1 so RandomPool would work even in the limited
|
||||
# exportable subset of the code
|
||||
if cipher is not None:
|
||||
warnings.warn("'cipher' parameter is no longer used")
|
||||
|
||||
if isinstance(hash, types.StringType):
|
||||
# ugly hack to force __import__ to give us the end-path module
|
||||
hash = __import__('Crypto.Hash.'+hash,
|
||||
None, None, ['new'])
|
||||
warnings.warn("'hash' parameter should now be a hashing module")
|
||||
|
||||
self.bytes = numbytes
|
||||
self.bits = self.bytes*8
|
||||
self.entropy = 0
|
||||
self._hash = hash
|
||||
|
||||
# Construct an array to hold the random pool,
|
||||
# initializing it to 0.
|
||||
self._randpool = array.array('B', [0]*self.bytes)
|
||||
|
||||
self._event1 = self._event2 = 0
|
||||
self._addPos = 0
|
||||
self._getPos = hash.digest_size
|
||||
self._lastcounter=time.time()
|
||||
self.__counter = 0
|
||||
|
||||
self._measureTickSize() # Estimate timer resolution
|
||||
self._randomize()
|
||||
|
||||
def _updateEntropyEstimate(self, nbits):
|
||||
self.entropy += nbits
|
||||
if self.entropy < 0:
|
||||
self.entropy = 0
|
||||
elif self.entropy > self.bits:
|
||||
self.entropy = self.bits
|
||||
|
||||
def _randomize(self, N = 0, devname = '/dev/urandom'):
|
||||
"""_randomize(N, DEVNAME:device-filepath)
|
||||
collects N bits of randomness from some entropy source (e.g.,
|
||||
/dev/urandom on Unixes that have it, Windows CryptoAPI
|
||||
CryptGenRandom, etc)
|
||||
DEVNAME is optional, defaults to /dev/urandom. You can change it
|
||||
to /dev/random if you want to block till you get enough
|
||||
entropy.
|
||||
"""
|
||||
data = ''
|
||||
if N <= 0:
|
||||
nbytes = int((self.bits - self.entropy)/8+0.5)
|
||||
else:
|
||||
nbytes = int(N/8+0.5)
|
||||
if winrandom:
|
||||
# Windows CryptGenRandom provides random data.
|
||||
data = winrandom.new().get_bytes(nbytes)
|
||||
elif os.path.exists(devname):
|
||||
# Many OSes support a /dev/urandom device
|
||||
try:
|
||||
f=open(devname)
|
||||
data=f.read(nbytes)
|
||||
f.close()
|
||||
except IOError, (num, msg):
|
||||
if num!=2: raise IOError, (num, msg)
|
||||
# If the file wasn't found, ignore the error
|
||||
if data:
|
||||
self._addBytes(data)
|
||||
# Entropy estimate: The number of bits of
|
||||
# data obtained from the random source.
|
||||
self._updateEntropyEstimate(8*len(data))
|
||||
self.stir_n() # Wash the random pool
|
||||
|
||||
def randomize(self, N=0):
|
||||
"""randomize(N:int)
|
||||
use the class entropy source to get some entropy data.
|
||||
This is overridden by KeyboardRandomize().
|
||||
"""
|
||||
return self._randomize(N)
|
||||
|
||||
def stir_n(self, N = STIRNUM):
|
||||
"""stir_n(N)
|
||||
stirs the random pool N times
|
||||
"""
|
||||
for i in xrange(N):
|
||||
self.stir()
|
||||
|
||||
def stir (self, s = ''):
|
||||
"""stir(s:string)
|
||||
Mix up the randomness pool. This will call add_event() twice,
|
||||
but out of paranoia the entropy attribute will not be
|
||||
increased. The optional 's' parameter is a string that will
|
||||
be hashed with the randomness pool.
|
||||
"""
|
||||
|
||||
entropy=self.entropy # Save inital entropy value
|
||||
self.add_event()
|
||||
|
||||
# Loop over the randomness pool: hash its contents
|
||||
# along with a counter, and add the resulting digest
|
||||
# back into the pool.
|
||||
for i in range(self.bytes / self._hash.digest_size):
|
||||
h = self._hash.new(self._randpool)
|
||||
h.update(str(self.__counter) + str(i) + str(self._addPos) + s)
|
||||
self._addBytes( h.digest() )
|
||||
self.__counter = (self.__counter + 1) & 0xFFFFffffL
|
||||
|
||||
self._addPos, self._getPos = 0, self._hash.digest_size
|
||||
self.add_event()
|
||||
|
||||
# Restore the old value of the entropy.
|
||||
self.entropy=entropy
|
||||
|
||||
|
||||
def get_bytes (self, N):
|
||||
"""get_bytes(N:int) : string
|
||||
Return N bytes of random data.
|
||||
"""
|
||||
|
||||
s=''
|
||||
i, pool = self._getPos, self._randpool
|
||||
h=self._hash.new()
|
||||
dsize = self._hash.digest_size
|
||||
num = N
|
||||
while num > 0:
|
||||
h.update( self._randpool[i:i+dsize] )
|
||||
s = s + h.digest()
|
||||
num = num - dsize
|
||||
i = (i + dsize) % self.bytes
|
||||
if i<dsize:
|
||||
self.stir()
|
||||
i=self._getPos
|
||||
|
||||
self._getPos = i
|
||||
self._updateEntropyEstimate(- 8*N)
|
||||
return s[:N]
|
||||
|
||||
|
||||
def add_event(self, s=''):
|
||||
"""add_event(s:string)
|
||||
Add an event to the random pool. The current time is stored
|
||||
between calls and used to estimate the entropy. The optional
|
||||
's' parameter is a string that will also be XORed into the pool.
|
||||
Returns the estimated number of additional bits of entropy gain.
|
||||
"""
|
||||
event = time.time()*1000
|
||||
delta = self._noise()
|
||||
s = (s + long_to_bytes(event) +
|
||||
4*chr(0xaa) + long_to_bytes(delta) )
|
||||
self._addBytes(s)
|
||||
if event==self._event1 and event==self._event2:
|
||||
# If events are coming too closely together, assume there's
|
||||
# no effective entropy being added.
|
||||
bits=0
|
||||
else:
|
||||
# Count the number of bits in delta, and assume that's the entropy.
|
||||
bits=0
|
||||
while delta:
|
||||
delta, bits = delta>>1, bits+1
|
||||
if bits>8: bits=8
|
||||
|
||||
self._event1, self._event2 = event, self._event1
|
||||
|
||||
self._updateEntropyEstimate(bits)
|
||||
return bits
|
||||
|
||||
# Private functions
|
||||
def _noise(self):
|
||||
# Adds a bit of noise to the random pool, by adding in the
|
||||
# current time and CPU usage of this process.
|
||||
# The difference from the previous call to _noise() is taken
|
||||
# in an effort to estimate the entropy.
|
||||
t=time.time()
|
||||
delta = (t - self._lastcounter)/self._ticksize*1e6
|
||||
self._lastcounter = t
|
||||
self._addBytes(long_to_bytes(long(1000*time.time())))
|
||||
self._addBytes(long_to_bytes(long(1000*time.clock())))
|
||||
self._addBytes(long_to_bytes(long(1000*time.time())))
|
||||
self._addBytes(long_to_bytes(long(delta)))
|
||||
|
||||
# Reduce delta to a maximum of 8 bits so we don't add too much
|
||||
# entropy as a result of this call.
|
||||
delta=delta % 0xff
|
||||
return int(delta)
|
||||
|
||||
|
||||
def _measureTickSize(self):
|
||||
# _measureTickSize() tries to estimate a rough average of the
|
||||
# resolution of time that you can see from Python. It does
|
||||
# this by measuring the time 100 times, computing the delay
|
||||
# between measurements, and taking the median of the resulting
|
||||
# list. (We also hash all the times and add them to the pool)
|
||||
interval = [None] * 100
|
||||
h = self._hash.new(`(id(self),id(interval))`)
|
||||
|
||||
# Compute 100 differences
|
||||
t=time.time()
|
||||
h.update(`t`)
|
||||
i = 0
|
||||
j = 0
|
||||
while i < 100:
|
||||
t2=time.time()
|
||||
h.update(`(i,j,t2)`)
|
||||
j += 1
|
||||
delta=int((t2-t)*1e6)
|
||||
if delta:
|
||||
interval[i] = delta
|
||||
i += 1
|
||||
t=t2
|
||||
|
||||
# Take the median of the array of intervals
|
||||
interval.sort()
|
||||
self._ticksize=interval[len(interval)/2]
|
||||
h.update(`(interval,self._ticksize)`)
|
||||
# mix in the measurement times and wash the random pool
|
||||
self.stir(h.digest())
|
||||
|
||||
def _addBytes(self, s):
|
||||
"XOR the contents of the string S into the random pool"
|
||||
i, pool = self._addPos, self._randpool
|
||||
for j in range(0, len(s)):
|
||||
pool[i]=pool[i] ^ ord(s[j])
|
||||
i=(i+1) % self.bytes
|
||||
self._addPos = i
|
||||
|
||||
# Deprecated method names: remove in PCT 2.1 or later.
|
||||
def getBytes(self, N):
|
||||
warnings.warn("getBytes() method replaced by get_bytes()",
|
||||
DeprecationWarning)
|
||||
return self.get_bytes(N)
|
||||
|
||||
def addEvent (self, event, s=""):
|
||||
warnings.warn("addEvent() method replaced by add_event()",
|
||||
DeprecationWarning)
|
||||
return self.add_event(s + str(event))
|
||||
|
||||
class PersistentRandomPool (RandomPool):
|
||||
def __init__ (self, filename=None, *args, **kwargs):
|
||||
RandomPool.__init__(self, *args, **kwargs)
|
||||
self.filename = filename
|
||||
if filename:
|
||||
try:
|
||||
# the time taken to open and read the file might have
|
||||
# a little disk variability, modulo disk/kernel caching...
|
||||
f=open(filename, 'rb')
|
||||
self.add_event()
|
||||
data = f.read()
|
||||
self.add_event()
|
||||
# mix in the data from the file and wash the random pool
|
||||
self.stir(data)
|
||||
f.close()
|
||||
except IOError:
|
||||
# Oh, well; the file doesn't exist or is unreadable, so
|
||||
# we'll just ignore it.
|
||||
pass
|
||||
|
||||
def save(self):
|
||||
if self.filename == "":
|
||||
raise ValueError, "No filename set for this object"
|
||||
# wash the random pool before save, provides some forward secrecy for
|
||||
# old values of the pool.
|
||||
self.stir_n()
|
||||
f=open(self.filename, 'wb')
|
||||
self.add_event()
|
||||
f.write(self._randpool.tostring())
|
||||
f.close()
|
||||
self.add_event()
|
||||
# wash the pool again, provide some protection for future values
|
||||
self.stir()
|
||||
|
||||
# non-echoing Windows keyboard entry
|
||||
_kb = 0
|
||||
if not _kb:
|
||||
try:
|
||||
import msvcrt
|
||||
class KeyboardEntry:
|
||||
def getch(self):
|
||||
c = msvcrt.getch()
|
||||
if c in ('\000', '\xe0'):
|
||||
# function key
|
||||
c += msvcrt.getch()
|
||||
return c
|
||||
def close(self, delay = 0):
|
||||
if delay:
|
||||
time.sleep(delay)
|
||||
while msvcrt.kbhit():
|
||||
msvcrt.getch()
|
||||
_kb = 1
|
||||
except:
|
||||
pass
|
||||
|
||||
# non-echoing Posix keyboard entry
|
||||
if not _kb:
|
||||
try:
|
||||
import termios
|
||||
class KeyboardEntry:
|
||||
def __init__(self, fd = 0):
|
||||
self._fd = fd
|
||||
self._old = termios.tcgetattr(fd)
|
||||
new = termios.tcgetattr(fd)
|
||||
new[3]=new[3] & ~termios.ICANON & ~termios.ECHO
|
||||
termios.tcsetattr(fd, termios.TCSANOW, new)
|
||||
def getch(self):
|
||||
termios.tcflush(0, termios.TCIFLUSH) # XXX Leave this in?
|
||||
return os.read(self._fd, 1)
|
||||
def close(self, delay = 0):
|
||||
if delay:
|
||||
time.sleep(delay)
|
||||
termios.tcflush(self._fd, termios.TCIFLUSH)
|
||||
termios.tcsetattr(self._fd, termios.TCSAFLUSH, self._old)
|
||||
_kb = 1
|
||||
except:
|
||||
pass
|
||||
|
||||
class KeyboardRandomPool (PersistentRandomPool):
|
||||
def __init__(self, *args, **kwargs):
|
||||
PersistentRandomPool.__init__(self, *args, **kwargs)
|
||||
|
||||
def randomize(self, N = 0):
|
||||
"Adds N bits of entropy to random pool. If N is 0, fill up pool."
|
||||
import os, string, time
|
||||
if N <= 0:
|
||||
bits = self.bits - self.entropy
|
||||
else:
|
||||
bits = N*8
|
||||
if bits == 0:
|
||||
return
|
||||
print bits,'bits of entropy are now required. Please type on the keyboard'
|
||||
print 'until enough randomness has been accumulated.'
|
||||
kb = KeyboardEntry()
|
||||
s='' # We'll save the characters typed and add them to the pool.
|
||||
hash = self._hash
|
||||
e = 0
|
||||
try:
|
||||
while e < bits:
|
||||
temp=str(bits-e).rjust(6)
|
||||
os.write(1, temp)
|
||||
s=s+kb.getch()
|
||||
e += self.add_event(s)
|
||||
os.write(1, 6*chr(8))
|
||||
self.add_event(s+hash.new(s).digest() )
|
||||
finally:
|
||||
kb.close()
|
||||
print '\n\007 Enough. Please wait a moment.\n'
|
||||
self.stir_n() # wash the random pool.
|
||||
kb.close(4)
|
||||
|
||||
if __name__ == '__main__':
|
||||
pool = RandomPool()
|
||||
print 'random pool entropy', pool.entropy, 'bits'
|
||||
pool.add_event('something')
|
||||
print `pool.get_bytes(100)`
|
||||
import tempfile, os
|
||||
fname = tempfile.mktemp()
|
||||
pool = KeyboardRandomPool(filename=fname)
|
||||
print 'keyboard random pool entropy', pool.entropy, 'bits'
|
||||
pool.randomize()
|
||||
print 'keyboard random pool entropy', pool.entropy, 'bits'
|
||||
pool.randomize(128)
|
||||
pool.save()
|
||||
saved = open(fname, 'rb').read()
|
||||
print 'saved', `saved`
|
||||
print 'pool ', `pool._randpool.tostring()`
|
||||
newpool = PersistentRandomPool(fname)
|
||||
print 'persistent random pool entropy', pool.entropy, 'bits'
|
||||
os.remove(fname)
|
||||
453
python/gdata/Crypto/Util/test.py
Normal file
453
python/gdata/Crypto/Util/test.py
Normal file
@@ -0,0 +1,453 @@
|
||||
#
|
||||
# test.py : Functions used for testing the modules
|
||||
#
|
||||
# Part of the Python Cryptography Toolkit
|
||||
#
|
||||
# Distribute and use freely; there are no restrictions on further
|
||||
# dissemination and usage except those imposed by the laws of your
|
||||
# country of residence. This software is provided "as is" without
|
||||
# warranty of fitness for use or suitability for any purpose, express
|
||||
# or implied. Use at your own risk or not at all.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: test.py,v 1.16 2004/08/13 22:24:18 akuchling Exp $"
|
||||
|
||||
import binascii
|
||||
import string
|
||||
import testdata
|
||||
|
||||
from Crypto.Cipher import *
|
||||
|
||||
def die(string):
|
||||
import sys
|
||||
print '***ERROR: ', string
|
||||
# sys.exit(0) # Will default to continuing onward...
|
||||
|
||||
def print_timing (size, delta, verbose):
|
||||
if verbose:
|
||||
if delta == 0:
|
||||
print 'Unable to measure time -- elapsed time too small'
|
||||
else:
|
||||
print '%.2f K/sec' % (size/delta)
|
||||
|
||||
def exerciseBlockCipher(cipher, verbose):
|
||||
import string, time
|
||||
try:
|
||||
ciph = eval(cipher)
|
||||
except NameError:
|
||||
print cipher, 'module not available'
|
||||
return None
|
||||
print cipher+ ':'
|
||||
str='1' # Build 128K of test data
|
||||
for i in xrange(0, 17):
|
||||
str=str+str
|
||||
if ciph.key_size==0: ciph.key_size=16
|
||||
password = 'password12345678Extra text for password'[0:ciph.key_size]
|
||||
IV = 'Test IV Test IV Test IV Test'[0:ciph.block_size]
|
||||
|
||||
if verbose: print ' ECB mode:',
|
||||
obj=ciph.new(password, ciph.MODE_ECB)
|
||||
if obj.block_size != ciph.block_size:
|
||||
die("Module and cipher object block_size don't match")
|
||||
|
||||
text='1234567812345678'[0:ciph.block_size]
|
||||
c=obj.encrypt(text)
|
||||
if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"')
|
||||
text='KuchlingKuchling'[0:ciph.block_size]
|
||||
c=obj.encrypt(text)
|
||||
if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"')
|
||||
text='NotTodayNotEver!'[0:ciph.block_size]
|
||||
c=obj.encrypt(text)
|
||||
if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"')
|
||||
|
||||
start=time.time()
|
||||
s=obj.encrypt(str)
|
||||
s2=obj.decrypt(s)
|
||||
end=time.time()
|
||||
if (str!=s2):
|
||||
die('Error in resulting plaintext from ECB mode')
|
||||
print_timing(256, end-start, verbose)
|
||||
del obj
|
||||
|
||||
if verbose: print ' CFB mode:',
|
||||
obj1=ciph.new(password, ciph.MODE_CFB, IV)
|
||||
obj2=ciph.new(password, ciph.MODE_CFB, IV)
|
||||
start=time.time()
|
||||
ciphertext=obj1.encrypt(str[0:65536])
|
||||
plaintext=obj2.decrypt(ciphertext)
|
||||
end=time.time()
|
||||
if (plaintext!=str[0:65536]):
|
||||
die('Error in resulting plaintext from CFB mode')
|
||||
print_timing(64, end-start, verbose)
|
||||
del obj1, obj2
|
||||
|
||||
if verbose: print ' CBC mode:',
|
||||
obj1=ciph.new(password, ciph.MODE_CBC, IV)
|
||||
obj2=ciph.new(password, ciph.MODE_CBC, IV)
|
||||
start=time.time()
|
||||
ciphertext=obj1.encrypt(str)
|
||||
plaintext=obj2.decrypt(ciphertext)
|
||||
end=time.time()
|
||||
if (plaintext!=str):
|
||||
die('Error in resulting plaintext from CBC mode')
|
||||
print_timing(256, end-start, verbose)
|
||||
del obj1, obj2
|
||||
|
||||
if verbose: print ' PGP mode:',
|
||||
obj1=ciph.new(password, ciph.MODE_PGP, IV)
|
||||
obj2=ciph.new(password, ciph.MODE_PGP, IV)
|
||||
start=time.time()
|
||||
ciphertext=obj1.encrypt(str)
|
||||
plaintext=obj2.decrypt(ciphertext)
|
||||
end=time.time()
|
||||
if (plaintext!=str):
|
||||
die('Error in resulting plaintext from PGP mode')
|
||||
print_timing(256, end-start, verbose)
|
||||
del obj1, obj2
|
||||
|
||||
if verbose: print ' OFB mode:',
|
||||
obj1=ciph.new(password, ciph.MODE_OFB, IV)
|
||||
obj2=ciph.new(password, ciph.MODE_OFB, IV)
|
||||
start=time.time()
|
||||
ciphertext=obj1.encrypt(str)
|
||||
plaintext=obj2.decrypt(ciphertext)
|
||||
end=time.time()
|
||||
if (plaintext!=str):
|
||||
die('Error in resulting plaintext from OFB mode')
|
||||
print_timing(256, end-start, verbose)
|
||||
del obj1, obj2
|
||||
|
||||
def counter(length=ciph.block_size):
|
||||
return length * 'a'
|
||||
|
||||
if verbose: print ' CTR mode:',
|
||||
obj1=ciph.new(password, ciph.MODE_CTR, counter=counter)
|
||||
obj2=ciph.new(password, ciph.MODE_CTR, counter=counter)
|
||||
start=time.time()
|
||||
ciphertext=obj1.encrypt(str)
|
||||
plaintext=obj2.decrypt(ciphertext)
|
||||
end=time.time()
|
||||
if (plaintext!=str):
|
||||
die('Error in resulting plaintext from CTR mode')
|
||||
print_timing(256, end-start, verbose)
|
||||
del obj1, obj2
|
||||
|
||||
# Test the IV handling
|
||||
if verbose: print ' Testing IV handling'
|
||||
obj1=ciph.new(password, ciph.MODE_CBC, IV)
|
||||
plaintext='Test'*(ciph.block_size/4)*3
|
||||
ciphertext1=obj1.encrypt(plaintext)
|
||||
obj1.IV=IV
|
||||
ciphertext2=obj1.encrypt(plaintext)
|
||||
if ciphertext1!=ciphertext2:
|
||||
die('Error in setting IV')
|
||||
|
||||
# Test keyword arguments
|
||||
obj1=ciph.new(key=password)
|
||||
obj1=ciph.new(password, mode=ciph.MODE_CBC)
|
||||
obj1=ciph.new(mode=ciph.MODE_CBC, key=password)
|
||||
obj1=ciph.new(IV=IV, mode=ciph.MODE_CBC, key=password)
|
||||
|
||||
return ciph
|
||||
|
||||
def exerciseStreamCipher(cipher, verbose):
|
||||
import string, time
|
||||
try:
|
||||
ciph = eval(cipher)
|
||||
except (NameError):
|
||||
print cipher, 'module not available'
|
||||
return None
|
||||
print cipher + ':',
|
||||
str='1' # Build 128K of test data
|
||||
for i in xrange(0, 17):
|
||||
str=str+str
|
||||
key_size = ciph.key_size or 16
|
||||
password = 'password12345678Extra text for password'[0:key_size]
|
||||
|
||||
obj1=ciph.new(password)
|
||||
obj2=ciph.new(password)
|
||||
if obj1.block_size != ciph.block_size:
|
||||
die("Module and cipher object block_size don't match")
|
||||
if obj1.key_size != ciph.key_size:
|
||||
die("Module and cipher object key_size don't match")
|
||||
|
||||
text='1234567812345678Python'
|
||||
c=obj1.encrypt(text)
|
||||
if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"')
|
||||
text='B1FF I2 A R3A11Y |<00L D00D!!!!!'
|
||||
c=obj1.encrypt(text)
|
||||
if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"')
|
||||
text='SpamSpamSpamSpamSpamSpamSpamSpamSpam'
|
||||
c=obj1.encrypt(text)
|
||||
if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"')
|
||||
|
||||
start=time.time()
|
||||
s=obj1.encrypt(str)
|
||||
str=obj2.decrypt(s)
|
||||
end=time.time()
|
||||
print_timing(256, end-start, verbose)
|
||||
del obj1, obj2
|
||||
|
||||
return ciph
|
||||
|
||||
def TestStreamModules(args=['arc4', 'XOR'], verbose=1):
|
||||
import sys, string
|
||||
args=map(string.lower, args)
|
||||
|
||||
if 'arc4' in args:
|
||||
# Test ARC4 stream cipher
|
||||
arc4=exerciseStreamCipher('ARC4', verbose)
|
||||
if (arc4!=None):
|
||||
for entry in testdata.arc4:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=arc4.new(key)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('ARC4 failed on entry '+`entry`)
|
||||
|
||||
if 'xor' in args:
|
||||
# Test XOR stream cipher
|
||||
XOR=exerciseStreamCipher('XOR', verbose)
|
||||
if (XOR!=None):
|
||||
for entry in testdata.xor:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=XOR.new(key)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('XOR failed on entry '+`entry`)
|
||||
|
||||
|
||||
def TestBlockModules(args=['aes', 'arc2', 'des', 'blowfish', 'cast', 'des3',
|
||||
'idea', 'rc5'],
|
||||
verbose=1):
|
||||
import string
|
||||
args=map(string.lower, args)
|
||||
if 'aes' in args:
|
||||
ciph=exerciseBlockCipher('AES', verbose) # AES
|
||||
if (ciph!=None):
|
||||
if verbose: print ' Verifying against test suite...'
|
||||
for entry in testdata.aes:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=ciph.new(key, ciph.MODE_ECB)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('AES failed on entry '+`entry`)
|
||||
for i in ciphertext:
|
||||
if verbose: print hex(ord(i)),
|
||||
if verbose: print
|
||||
|
||||
for entry in testdata.aes_modes:
|
||||
mode, key, plain, cipher, kw = entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=ciph.new(key, mode, **kw)
|
||||
obj2=ciph.new(key, mode, **kw)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('AES encrypt failed on entry '+`entry`)
|
||||
for i in ciphertext:
|
||||
if verbose: print hex(ord(i)),
|
||||
if verbose: print
|
||||
|
||||
plain2=obj2.decrypt(ciphertext)
|
||||
if plain2!=plain:
|
||||
die('AES decrypt failed on entry '+`entry`)
|
||||
for i in plain2:
|
||||
if verbose: print hex(ord(i)),
|
||||
if verbose: print
|
||||
|
||||
|
||||
if 'arc2' in args:
|
||||
ciph=exerciseBlockCipher('ARC2', verbose) # Alleged RC2
|
||||
if (ciph!=None):
|
||||
if verbose: print ' Verifying against test suite...'
|
||||
for entry in testdata.arc2:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=ciph.new(key, ciph.MODE_ECB)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('ARC2 failed on entry '+`entry`)
|
||||
for i in ciphertext:
|
||||
if verbose: print hex(ord(i)),
|
||||
print
|
||||
|
||||
if 'blowfish' in args:
|
||||
ciph=exerciseBlockCipher('Blowfish',verbose)# Bruce Schneier's Blowfish cipher
|
||||
if (ciph!=None):
|
||||
if verbose: print ' Verifying against test suite...'
|
||||
for entry in testdata.blowfish:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=ciph.new(key, ciph.MODE_ECB)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('Blowfish failed on entry '+`entry`)
|
||||
for i in ciphertext:
|
||||
if verbose: print hex(ord(i)),
|
||||
if verbose: print
|
||||
|
||||
if 'cast' in args:
|
||||
ciph=exerciseBlockCipher('CAST', verbose) # CAST-128
|
||||
if (ciph!=None):
|
||||
if verbose: print ' Verifying against test suite...'
|
||||
for entry in testdata.cast:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=ciph.new(key, ciph.MODE_ECB)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('CAST failed on entry '+`entry`)
|
||||
for i in ciphertext:
|
||||
if verbose: print hex(ord(i)),
|
||||
if verbose: print
|
||||
|
||||
if 0:
|
||||
# The full-maintenance test; it requires 4 million encryptions,
|
||||
# and correspondingly is quite time-consuming. I've disabled
|
||||
# it; it's faster to compile block/cast.c with -DTEST and run
|
||||
# the resulting program.
|
||||
a = b = '\x01\x23\x45\x67\x12\x34\x56\x78\x23\x45\x67\x89\x34\x56\x78\x9A'
|
||||
|
||||
for i in range(0, 1000000):
|
||||
obj = cast.new(b, cast.MODE_ECB)
|
||||
a = obj.encrypt(a[:8]) + obj.encrypt(a[-8:])
|
||||
obj = cast.new(a, cast.MODE_ECB)
|
||||
b = obj.encrypt(b[:8]) + obj.encrypt(b[-8:])
|
||||
|
||||
if a!="\xEE\xA9\xD0\xA2\x49\xFD\x3B\xA6\xB3\x43\x6F\xB8\x9D\x6D\xCA\x92":
|
||||
if verbose: print 'CAST test failed: value of "a" doesn\'t match'
|
||||
if b!="\xB2\xC9\x5E\xB0\x0C\x31\xAD\x71\x80\xAC\x05\xB8\xE8\x3D\x69\x6E":
|
||||
if verbose: print 'CAST test failed: value of "b" doesn\'t match'
|
||||
|
||||
if 'des' in args:
|
||||
# Test/benchmark DES block cipher
|
||||
des=exerciseBlockCipher('DES', verbose)
|
||||
if (des!=None):
|
||||
# Various tests taken from the DES library packaged with Kerberos V4
|
||||
obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_ECB)
|
||||
s=obj.encrypt('Now is t')
|
||||
if (s!=binascii.a2b_hex('3fa40e8a984d4815')):
|
||||
die('DES fails test 1')
|
||||
obj=des.new(binascii.a2b_hex('08192a3b4c5d6e7f'), des.MODE_ECB)
|
||||
s=obj.encrypt('\000\000\000\000\000\000\000\000')
|
||||
if (s!=binascii.a2b_hex('25ddac3e96176467')):
|
||||
die('DES fails test 2')
|
||||
obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC,
|
||||
binascii.a2b_hex('1234567890abcdef'))
|
||||
s=obj.encrypt("Now is the time for all ")
|
||||
if (s!=binascii.a2b_hex('e5c7cdde872bf27c43e934008c389c0f683788499a7c05f6')):
|
||||
die('DES fails test 3')
|
||||
obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC,
|
||||
binascii.a2b_hex('fedcba9876543210'))
|
||||
s=obj.encrypt("7654321 Now is the time for \000\000\000\000")
|
||||
if (s!=binascii.a2b_hex("ccd173ffab2039f4acd8aefddfd8a1eb468e91157888ba681d269397f7fe62b4")):
|
||||
die('DES fails test 4')
|
||||
del obj,s
|
||||
|
||||
# R. Rivest's test: see http://theory.lcs.mit.edu/~rivest/destest.txt
|
||||
x=binascii.a2b_hex('9474B8E8C73BCA7D')
|
||||
for i in range(0, 16):
|
||||
obj=des.new(x, des.MODE_ECB)
|
||||
if (i & 1): x=obj.decrypt(x)
|
||||
else: x=obj.encrypt(x)
|
||||
if x!=binascii.a2b_hex('1B1A2DDB4C642438'):
|
||||
die("DES fails Rivest's test")
|
||||
|
||||
if verbose: print ' Verifying against test suite...'
|
||||
for entry in testdata.des:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=des.new(key, des.MODE_ECB)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('DES failed on entry '+`entry`)
|
||||
for entry in testdata.des_cbc:
|
||||
key, iv, plain, cipher=entry
|
||||
key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher)
|
||||
obj1=des.new(key, des.MODE_CBC, iv)
|
||||
obj2=des.new(key, des.MODE_CBC, iv)
|
||||
ciphertext=obj1.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('DES CBC mode failed on entry '+`entry`)
|
||||
|
||||
if 'des3' in args:
|
||||
ciph=exerciseBlockCipher('DES3', verbose) # Triple DES
|
||||
if (ciph!=None):
|
||||
if verbose: print ' Verifying against test suite...'
|
||||
for entry in testdata.des3:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=ciph.new(key, ciph.MODE_ECB)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('DES3 failed on entry '+`entry`)
|
||||
for i in ciphertext:
|
||||
if verbose: print hex(ord(i)),
|
||||
if verbose: print
|
||||
for entry in testdata.des3_cbc:
|
||||
key, iv, plain, cipher=entry
|
||||
key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher)
|
||||
obj1=ciph.new(key, ciph.MODE_CBC, iv)
|
||||
obj2=ciph.new(key, ciph.MODE_CBC, iv)
|
||||
ciphertext=obj1.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('DES3 CBC mode failed on entry '+`entry`)
|
||||
|
||||
if 'idea' in args:
|
||||
ciph=exerciseBlockCipher('IDEA', verbose) # IDEA block cipher
|
||||
if (ciph!=None):
|
||||
if verbose: print ' Verifying against test suite...'
|
||||
for entry in testdata.idea:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=ciph.new(key, ciph.MODE_ECB)
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('IDEA failed on entry '+`entry`)
|
||||
|
||||
if 'rc5' in args:
|
||||
# Ronald Rivest's RC5 algorithm
|
||||
ciph=exerciseBlockCipher('RC5', verbose)
|
||||
if (ciph!=None):
|
||||
if verbose: print ' Verifying against test suite...'
|
||||
for entry in testdata.rc5:
|
||||
key,plain,cipher=entry
|
||||
key=binascii.a2b_hex(key)
|
||||
plain=binascii.a2b_hex(plain)
|
||||
cipher=binascii.a2b_hex(cipher)
|
||||
obj=ciph.new(key[4:], ciph.MODE_ECB,
|
||||
version =ord(key[0]),
|
||||
word_size=ord(key[1]),
|
||||
rounds =ord(key[2]) )
|
||||
ciphertext=obj.encrypt(plain)
|
||||
if (ciphertext!=cipher):
|
||||
die('RC5 failed on entry '+`entry`)
|
||||
for i in ciphertext:
|
||||
if verbose: print hex(ord(i)),
|
||||
if verbose: print
|
||||
|
||||
|
||||
|
||||
25
python/gdata/Crypto/__init__.py
Normal file
25
python/gdata/Crypto/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
|
||||
"""Python Cryptography Toolkit
|
||||
|
||||
A collection of cryptographic modules implementing various algorithms
|
||||
and protocols.
|
||||
|
||||
Subpackages:
|
||||
Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4)
|
||||
Crypto.Hash Hashing algorithms (MD5, SHA, HMAC)
|
||||
Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing
|
||||
transform). This package does not contain any
|
||||
network protocols.
|
||||
Crypto.PublicKey Public-key encryption and signature algorithms
|
||||
(RSA, DSA)
|
||||
Crypto.Util Various useful modules and functions (long-to-string
|
||||
conversion, random number generation, number
|
||||
theoretic functions)
|
||||
"""
|
||||
|
||||
__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util']
|
||||
|
||||
__version__ = '2.0.1'
|
||||
__revision__ = "$Id: __init__.py,v 1.12 2005/06/14 01:20:22 akuchling Exp $"
|
||||
|
||||
|
||||
38
python/gdata/Crypto/test.py
Normal file
38
python/gdata/Crypto/test.py
Normal file
@@ -0,0 +1,38 @@
|
||||
#
|
||||
# Test script for the Python Cryptography Toolkit.
|
||||
#
|
||||
|
||||
__revision__ = "$Id: test.py,v 1.7 2002/07/11 14:31:19 akuchling Exp $"
|
||||
|
||||
import os, sys
|
||||
|
||||
|
||||
# Add the build directory to the front of sys.path
|
||||
from distutils.util import get_platform
|
||||
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
|
||||
s = os.path.join(os.getcwd(), s)
|
||||
sys.path.insert(0, s)
|
||||
s = os.path.join(os.getcwd(), 'test')
|
||||
sys.path.insert(0, s)
|
||||
|
||||
from Crypto.Util import test
|
||||
|
||||
args = sys.argv[1:]
|
||||
quiet = "--quiet" in args
|
||||
if quiet: args.remove('--quiet')
|
||||
|
||||
if not quiet:
|
||||
print '\nStream Ciphers:'
|
||||
print '==============='
|
||||
|
||||
if args: test.TestStreamModules(args, verbose= not quiet)
|
||||
else: test.TestStreamModules(verbose= not quiet)
|
||||
|
||||
if not quiet:
|
||||
print '\nBlock Ciphers:'
|
||||
print '=============='
|
||||
|
||||
if args: test.TestBlockModules(args, verbose= not quiet)
|
||||
else: test.TestBlockModules(verbose= not quiet)
|
||||
|
||||
|
||||
835
python/gdata/__init__.py
Normal file
835
python/gdata/__init__.py
Normal file
@@ -0,0 +1,835 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2006 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Contains classes representing Google Data elements.
|
||||
|
||||
Extends Atom classes to add Google Data specific elements.
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeffrey Scudder)'
|
||||
|
||||
import os
|
||||
import atom
|
||||
try:
|
||||
from xml.etree import cElementTree as ElementTree
|
||||
except ImportError:
|
||||
try:
|
||||
import cElementTree as ElementTree
|
||||
except ImportError:
|
||||
try:
|
||||
from xml.etree import ElementTree
|
||||
except ImportError:
|
||||
from elementtree import ElementTree
|
||||
|
||||
|
||||
# XML namespaces which are often used in GData entities.
|
||||
GDATA_NAMESPACE = 'http://schemas.google.com/g/2005'
|
||||
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
|
||||
OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/'
|
||||
OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
|
||||
BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch'
|
||||
GACL_NAMESPACE = 'http://schemas.google.com/acl/2007'
|
||||
GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
|
||||
|
||||
|
||||
# Labels used in batch request entries to specify the desired CRUD operation.
|
||||
BATCH_INSERT = 'insert'
|
||||
BATCH_UPDATE = 'update'
|
||||
BATCH_DELETE = 'delete'
|
||||
BATCH_QUERY = 'query'
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MissingRequiredParameters(Error):
|
||||
pass
|
||||
|
||||
|
||||
class MediaSource(object):
|
||||
"""GData Entries can refer to media sources, so this class provides a
|
||||
place to store references to these objects along with some metadata.
|
||||
"""
|
||||
|
||||
def __init__(self, file_handle=None, content_type=None, content_length=None,
|
||||
file_path=None, file_name=None):
|
||||
"""Creates an object of type MediaSource.
|
||||
|
||||
Args:
|
||||
file_handle: A file handle pointing to the file to be encapsulated in the
|
||||
MediaSource
|
||||
content_type: string The MIME type of the file. Required if a file_handle
|
||||
is given.
|
||||
content_length: int The size of the file. Required if a file_handle is
|
||||
given.
|
||||
file_path: string (optional) A full path name to the file. Used in
|
||||
place of a file_handle.
|
||||
file_name: string The name of the file without any path information.
|
||||
Required if a file_handle is given.
|
||||
"""
|
||||
self.file_handle = file_handle
|
||||
self.content_type = content_type
|
||||
self.content_length = content_length
|
||||
self.file_name = file_name
|
||||
|
||||
if (file_handle is None and content_type is not None and
|
||||
file_path is not None):
|
||||
self.setFile(file_path, content_type)
|
||||
|
||||
def setFile(self, file_name, content_type):
|
||||
"""A helper function which can create a file handle from a given filename
|
||||
and set the content type and length all at once.
|
||||
|
||||
Args:
|
||||
file_name: string The path and file name to the file containing the media
|
||||
content_type: string A MIME type representing the type of the media
|
||||
"""
|
||||
|
||||
self.file_handle = open(file_name, 'rb')
|
||||
self.content_type = content_type
|
||||
self.content_length = os.path.getsize(file_name)
|
||||
self.file_name = os.path.basename(file_name)
|
||||
|
||||
|
||||
class LinkFinder(atom.LinkFinder):
|
||||
"""An "interface" providing methods to find link elements
|
||||
|
||||
GData Entry elements often contain multiple links which differ in the rel
|
||||
attribute or content type. Often, developers are interested in a specific
|
||||
type of link so this class provides methods to find specific classes of
|
||||
links.
|
||||
|
||||
This class is used as a mixin in GData entries.
|
||||
"""
|
||||
|
||||
def GetSelfLink(self):
|
||||
"""Find the first link with rel set to 'self'
|
||||
|
||||
Returns:
|
||||
An atom.Link or none if none of the links had rel equal to 'self'
|
||||
"""
|
||||
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'self':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetEditLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'edit':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetEditMediaLink(self):
|
||||
"""The Picasa API mistakenly returns media-edit rather than edit-media, but
|
||||
this may change soon.
|
||||
"""
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'edit-media':
|
||||
return a_link
|
||||
if a_link.rel == 'media-edit':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetHtmlLink(self):
|
||||
"""Find the first link with rel of alternate and type of text/html
|
||||
|
||||
Returns:
|
||||
An atom.Link or None if no links matched
|
||||
"""
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'alternate' and a_link.type == 'text/html':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetPostLink(self):
|
||||
"""Get a link containing the POST target URL.
|
||||
|
||||
The POST target URL is used to insert new entries.
|
||||
|
||||
Returns:
|
||||
A link object with a rel matching the POST type.
|
||||
"""
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'http://schemas.google.com/g/2005#post':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetAclLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetFeedLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'http://schemas.google.com/g/2005#feed':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetNextLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'next':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetPrevLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == 'previous':
|
||||
return a_link
|
||||
return None
|
||||
|
||||
|
||||
class TotalResults(atom.AtomBase):
|
||||
"""opensearch:TotalResults for a GData feed"""
|
||||
|
||||
_tag = 'totalResults'
|
||||
_namespace = OPENSEARCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
def __init__(self, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def TotalResultsFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(TotalResults, xml_string)
|
||||
|
||||
|
||||
class StartIndex(atom.AtomBase):
|
||||
"""The opensearch:startIndex element in GData feed"""
|
||||
|
||||
_tag = 'startIndex'
|
||||
_namespace = OPENSEARCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
def __init__(self, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def StartIndexFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(StartIndex, xml_string)
|
||||
|
||||
|
||||
class ItemsPerPage(atom.AtomBase):
|
||||
"""The opensearch:itemsPerPage element in GData feed"""
|
||||
|
||||
_tag = 'itemsPerPage'
|
||||
_namespace = OPENSEARCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
def __init__(self, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def ItemsPerPageFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(ItemsPerPage, xml_string)
|
||||
|
||||
|
||||
class ExtendedProperty(atom.AtomBase):
|
||||
"""The Google Data extendedProperty element.
|
||||
|
||||
Used to store arbitrary key-value information specific to your
|
||||
application. The value can either be a text string stored as an XML
|
||||
attribute (.value), or an XML node (XmlBlob) as a child element.
|
||||
|
||||
This element is used in the Google Calendar data API and the Google
|
||||
Contacts data API.
|
||||
"""
|
||||
|
||||
_tag = 'extendedProperty'
|
||||
_namespace = GDATA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['name'] = 'name'
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, name=None, value=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
def GetXmlBlobExtensionElement(self):
|
||||
"""Returns the XML blob as an atom.ExtensionElement.
|
||||
|
||||
Returns:
|
||||
An atom.ExtensionElement representing the blob's XML, or None if no
|
||||
blob was set.
|
||||
"""
|
||||
if len(self.extension_elements) < 1:
|
||||
return None
|
||||
else:
|
||||
return self.extension_elements[0]
|
||||
|
||||
def GetXmlBlobString(self):
|
||||
"""Returns the XML blob as a string.
|
||||
|
||||
Returns:
|
||||
A string containing the blob's XML, or None if no blob was set.
|
||||
"""
|
||||
blob = self.GetXmlBlobExtensionElement()
|
||||
if blob:
|
||||
return blob.ToString()
|
||||
return None
|
||||
|
||||
def SetXmlBlob(self, blob):
|
||||
"""Sets the contents of the extendedProperty to XML as a child node.
|
||||
|
||||
Since the extendedProperty is only allowed one child element as an XML
|
||||
blob, setting the XML blob will erase any preexisting extension elements
|
||||
in this object.
|
||||
|
||||
Args:
|
||||
blob: str, ElementTree Element or atom.ExtensionElement representing
|
||||
the XML blob stored in the extendedProperty.
|
||||
"""
|
||||
# Erase any existing extension_elements, clears the child nodes from the
|
||||
# extendedProperty.
|
||||
self.extension_elements = []
|
||||
if isinstance(blob, atom.ExtensionElement):
|
||||
self.extension_elements.append(blob)
|
||||
elif ElementTree.iselement(blob):
|
||||
self.extension_elements.append(atom._ExtensionElementFromElementTree(
|
||||
blob))
|
||||
else:
|
||||
self.extension_elements.append(atom.ExtensionElementFromString(blob))
|
||||
|
||||
|
||||
def ExtendedPropertyFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(ExtendedProperty, xml_string)
|
||||
|
||||
|
||||
class GDataEntry(atom.Entry, LinkFinder):
|
||||
"""Extends Atom Entry to provide data processing"""
|
||||
|
||||
_tag = atom.Entry._tag
|
||||
_namespace = atom.Entry._namespace
|
||||
_children = atom.Entry._children.copy()
|
||||
_attributes = atom.Entry._attributes.copy()
|
||||
|
||||
def __GetId(self):
|
||||
return self.__id
|
||||
|
||||
# This method was created to strip the unwanted whitespace from the id's
|
||||
# text node.
|
||||
def __SetId(self, id):
|
||||
self.__id = id
|
||||
if id is not None and id.text is not None:
|
||||
self.__id.text = id.text.strip()
|
||||
|
||||
id = property(__GetId, __SetId)
|
||||
|
||||
def IsMedia(self):
|
||||
"""Determines whether or not an entry is a GData Media entry.
|
||||
"""
|
||||
if (self.GetEditMediaLink()):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def GetMediaURL(self):
|
||||
"""Returns the URL to the media content, if the entry is a media entry.
|
||||
Otherwise returns None.
|
||||
"""
|
||||
if not self.IsMedia():
|
||||
return None
|
||||
else:
|
||||
return self.content.src
|
||||
|
||||
|
||||
def GDataEntryFromString(xml_string):
|
||||
"""Creates a new GDataEntry instance given a string of XML."""
|
||||
return atom.CreateClassFromXMLString(GDataEntry, xml_string)
|
||||
|
||||
|
||||
class GDataFeed(atom.Feed, LinkFinder):
|
||||
"""A Feed from a GData service"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = atom.Feed._children.copy()
|
||||
_attributes = atom.Feed._attributes.copy()
|
||||
_children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results',
|
||||
TotalResults)
|
||||
_children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index',
|
||||
StartIndex)
|
||||
_children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page',
|
||||
ItemsPerPage)
|
||||
# Add a conversion rule for atom:entry to make it into a GData
|
||||
# Entry.
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry])
|
||||
|
||||
def __GetId(self):
|
||||
return self.__id
|
||||
|
||||
def __SetId(self, id):
|
||||
self.__id = id
|
||||
if id is not None and id.text is not None:
|
||||
self.__id.text = id.text.strip()
|
||||
|
||||
id = property(__GetId, __SetId)
|
||||
|
||||
def __GetGenerator(self):
|
||||
return self.__generator
|
||||
|
||||
def __SetGenerator(self, generator):
|
||||
self.__generator = generator
|
||||
if generator is not None:
|
||||
self.__generator.text = generator.text.strip()
|
||||
|
||||
generator = property(__GetGenerator, __SetGenerator)
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None, entry=None,
|
||||
total_results=None, start_index=None, items_per_page=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
"""Constructor for Source
|
||||
|
||||
Args:
|
||||
author: list (optional) A list of Author instances which belong to this
|
||||
class.
|
||||
category: list (optional) A list of Category instances
|
||||
contributor: list (optional) A list on Contributor instances
|
||||
generator: Generator (optional)
|
||||
icon: Icon (optional)
|
||||
id: Id (optional) The entry's Id element
|
||||
link: list (optional) A list of Link instances
|
||||
logo: Logo (optional)
|
||||
rights: Rights (optional) The entry's Rights element
|
||||
subtitle: Subtitle (optional) The entry's subtitle element
|
||||
title: Title (optional) the entry's title element
|
||||
updated: Updated (optional) the entry's updated element
|
||||
entry: list (optional) A list of the Entry instances contained in the
|
||||
feed.
|
||||
text: String (optional) The text contents of the element. This is the
|
||||
contents of the Entry's XML text node.
|
||||
(Example: <foo>This is the text</foo>)
|
||||
extension_elements: list (optional) A list of ExtensionElement instances
|
||||
which are children of this element.
|
||||
extension_attributes: dict (optional) A dictionary of strings which are
|
||||
the values for additional XML attributes of this element.
|
||||
"""
|
||||
|
||||
self.author = author or []
|
||||
self.category = category or []
|
||||
self.contributor = contributor or []
|
||||
self.generator = generator
|
||||
self.icon = icon
|
||||
self.id = atom_id
|
||||
self.link = link or []
|
||||
self.logo = logo
|
||||
self.rights = rights
|
||||
self.subtitle = subtitle
|
||||
self.title = title
|
||||
self.updated = updated
|
||||
self.entry = entry or []
|
||||
self.total_results = total_results
|
||||
self.start_index = start_index
|
||||
self.items_per_page = items_per_page
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def GDataFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GDataFeed, xml_string)
|
||||
|
||||
|
||||
class BatchId(atom.AtomBase):
|
||||
_tag = 'id'
|
||||
_namespace = BATCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
|
||||
def BatchIdFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BatchId, xml_string)
|
||||
|
||||
|
||||
class BatchOperation(atom.AtomBase):
|
||||
_tag = 'operation'
|
||||
_namespace = BATCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['type'] = 'type'
|
||||
|
||||
def __init__(self, op_type=None, extension_elements=None,
|
||||
extension_attributes=None,
|
||||
text=None):
|
||||
self.type = op_type
|
||||
atom.AtomBase.__init__(self,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def BatchOperationFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BatchOperation, xml_string)
|
||||
|
||||
|
||||
class BatchStatus(atom.AtomBase):
|
||||
"""The batch:status element present in a batch response entry.
|
||||
|
||||
A status element contains the code (HTTP response code) and
|
||||
reason as elements. In a single request these fields would
|
||||
be part of the HTTP response, but in a batch request each
|
||||
Entry operation has a corresponding Entry in the response
|
||||
feed which includes status information.
|
||||
|
||||
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
|
||||
"""
|
||||
|
||||
_tag = 'status'
|
||||
_namespace = BATCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['code'] = 'code'
|
||||
_attributes['reason'] = 'reason'
|
||||
_attributes['content-type'] = 'content_type'
|
||||
|
||||
def __init__(self, code=None, reason=None, content_type=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
self.code = code
|
||||
self.reason = reason
|
||||
self.content_type = content_type
|
||||
atom.AtomBase.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def BatchStatusFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BatchStatus, xml_string)
|
||||
|
||||
|
||||
class BatchEntry(GDataEntry):
|
||||
"""An atom:entry for use in batch requests.
|
||||
|
||||
The BatchEntry contains additional members to specify the operation to be
|
||||
performed on this entry and a batch ID so that the server can reference
|
||||
individual operations in the response feed. For more information, see:
|
||||
http://code.google.com/apis/gdata/batch.html
|
||||
"""
|
||||
|
||||
_tag = GDataEntry._tag
|
||||
_namespace = GDataEntry._namespace
|
||||
_children = GDataEntry._children.copy()
|
||||
_children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation)
|
||||
_children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId)
|
||||
_children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus)
|
||||
_attributes = GDataEntry._attributes.copy()
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
contributor=None, atom_id=None, link=None, published=None, rights=None,
|
||||
source=None, summary=None, control=None, title=None, updated=None,
|
||||
batch_operation=None, batch_id=None, batch_status=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
self.batch_operation = batch_operation
|
||||
self.batch_id = batch_id
|
||||
self.batch_status = batch_status
|
||||
GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content, contributor=contributor, atom_id=atom_id, link=link,
|
||||
published=published, rights=rights, source=source, summary=summary,
|
||||
control=control, title=title, updated=updated,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
|
||||
def BatchEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BatchEntry, xml_string)
|
||||
|
||||
|
||||
class BatchInterrupted(atom.AtomBase):
|
||||
"""The batch:interrupted element sent if batch request was interrupted.
|
||||
|
||||
Only appears in a feed if some of the batch entries could not be processed.
|
||||
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
|
||||
"""
|
||||
|
||||
_tag = 'interrupted'
|
||||
_namespace = BATCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['reason'] = 'reason'
|
||||
_attributes['success'] = 'success'
|
||||
_attributes['failures'] = 'failures'
|
||||
_attributes['parsed'] = 'parsed'
|
||||
|
||||
def __init__(self, reason=None, success=None, failures=None, parsed=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
self.reason = reason
|
||||
self.success = success
|
||||
self.failures = failures
|
||||
self.parsed = parsed
|
||||
atom.AtomBase.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def BatchInterruptedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BatchInterrupted, xml_string)
|
||||
|
||||
|
||||
class BatchFeed(GDataFeed):
|
||||
"""A feed containing a list of batch request entries."""
|
||||
|
||||
_tag = GDataFeed._tag
|
||||
_namespace = GDataFeed._namespace
|
||||
_children = GDataFeed._children.copy()
|
||||
_attributes = GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry])
|
||||
_children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted)
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None, entry=None,
|
||||
total_results=None, start_index=None, items_per_page=None,
|
||||
interrupted=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
self.interrupted = interrupted
|
||||
GDataFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results, start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
def AddBatchEntry(self, entry=None, id_url_string=None,
|
||||
batch_id_string=None, operation_string=None):
|
||||
"""Logic for populating members of a BatchEntry and adding to the feed.
|
||||
|
||||
|
||||
If the entry is not a BatchEntry, it is converted to a BatchEntry so
|
||||
that the batch specific members will be present.
|
||||
|
||||
The id_url_string can be used in place of an entry if the batch operation
|
||||
applies to a URL. For example query and delete operations require just
|
||||
the URL of an entry, no body is sent in the HTTP request. If an
|
||||
id_url_string is sent instead of an entry, a BatchEntry is created and
|
||||
added to the feed.
|
||||
|
||||
This method also assigns the desired batch id to the entry so that it
|
||||
can be referenced in the server's response. If the batch_id_string is
|
||||
None, this method will assign a batch_id to be the index at which this
|
||||
entry will be in the feed's entry list.
|
||||
|
||||
Args:
|
||||
entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
|
||||
entry which will be sent to the server as part of the batch request.
|
||||
The item must have a valid atom id so that the server knows which
|
||||
entry this request references.
|
||||
id_url_string: str (optional) The URL of the entry to be acted on. You
|
||||
can find this URL in the text member of the atom id for an entry.
|
||||
If an entry is not sent, this id will be used to construct a new
|
||||
BatchEntry which will be added to the request feed.
|
||||
batch_id_string: str (optional) The batch ID to be used to reference
|
||||
this batch operation in the results feed. If this parameter is None,
|
||||
the current length of the feed's entry array will be used as a
|
||||
count. Note that batch_ids should either always be specified or
|
||||
never, mixing could potentially result in duplicate batch ids.
|
||||
operation_string: str (optional) The desired batch operation which will
|
||||
set the batch_operation.type member of the entry. Options are
|
||||
'insert', 'update', 'delete', and 'query'
|
||||
|
||||
Raises:
|
||||
MissingRequiredParameters: Raised if neither an id_ url_string nor an
|
||||
entry are provided in the request.
|
||||
|
||||
Returns:
|
||||
The added entry.
|
||||
"""
|
||||
if entry is None and id_url_string is None:
|
||||
raise MissingRequiredParameters('supply either an entry or URL string')
|
||||
if entry is None and id_url_string is not None:
|
||||
entry = BatchEntry(atom_id=atom.Id(text=id_url_string))
|
||||
# TODO: handle cases in which the entry lacks batch_... members.
|
||||
#if not isinstance(entry, BatchEntry):
|
||||
# Convert the entry to a batch entry.
|
||||
if batch_id_string is not None:
|
||||
entry.batch_id = BatchId(text=batch_id_string)
|
||||
elif entry.batch_id is None or entry.batch_id.text is None:
|
||||
entry.batch_id = BatchId(text=str(len(self.entry)))
|
||||
if operation_string is not None:
|
||||
entry.batch_operation = BatchOperation(op_type=operation_string)
|
||||
self.entry.append(entry)
|
||||
return entry
|
||||
|
||||
def AddInsert(self, entry, batch_id_string=None):
|
||||
"""Add an insert request to the operations in this batch request feed.
|
||||
|
||||
If the entry doesn't yet have an operation or a batch id, these will
|
||||
be set to the insert operation and a batch_id specified as a parameter.
|
||||
|
||||
Args:
|
||||
entry: BatchEntry The entry which will be sent in the batch feed as an
|
||||
insert request.
|
||||
batch_id_string: str (optional) The batch ID to be used to reference
|
||||
this batch operation in the results feed. If this parameter is None,
|
||||
the current length of the feed's entry array will be used as a
|
||||
count. Note that batch_ids should either always be specified or
|
||||
never, mixing could potentially result in duplicate batch ids.
|
||||
"""
|
||||
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
|
||||
operation_string=BATCH_INSERT)
|
||||
|
||||
def AddUpdate(self, entry, batch_id_string=None):
|
||||
"""Add an update request to the list of batch operations in this feed.
|
||||
|
||||
Sets the operation type of the entry to insert if it is not already set
|
||||
and assigns the desired batch id to the entry so that it can be
|
||||
referenced in the server's response.
|
||||
|
||||
Args:
|
||||
entry: BatchEntry The entry which will be sent to the server as an
|
||||
update (HTTP PUT) request. The item must have a valid atom id
|
||||
so that the server knows which entry to replace.
|
||||
batch_id_string: str (optional) The batch ID to be used to reference
|
||||
this batch operation in the results feed. If this parameter is None,
|
||||
the current length of the feed's entry array will be used as a
|
||||
count. See also comments for AddInsert.
|
||||
"""
|
||||
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
|
||||
operation_string=BATCH_UPDATE)
|
||||
|
||||
def AddDelete(self, url_string=None, entry=None, batch_id_string=None):
|
||||
"""Adds a delete request to the batch request feed.
|
||||
|
||||
This method takes either the url_string which is the atom id of the item
|
||||
to be deleted, or the entry itself. The atom id of the entry must be
|
||||
present so that the server knows which entry should be deleted.
|
||||
|
||||
Args:
|
||||
url_string: str (optional) The URL of the entry to be deleted. You can
|
||||
find this URL in the text member of the atom id for an entry.
|
||||
entry: BatchEntry (optional) The entry to be deleted.
|
||||
batch_id_string: str (optional)
|
||||
|
||||
Raises:
|
||||
MissingRequiredParameters: Raised if neither a url_string nor an entry
|
||||
are provided in the request.
|
||||
"""
|
||||
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
|
||||
batch_id_string=batch_id_string,
|
||||
operation_string=BATCH_DELETE)
|
||||
|
||||
def AddQuery(self, url_string=None, entry=None, batch_id_string=None):
|
||||
"""Adds a query request to the batch request feed.
|
||||
|
||||
This method takes either the url_string which is the query URL
|
||||
whose results will be added to the result feed. The query URL will
|
||||
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
|
||||
with a query URL instead of sending a url_string.
|
||||
|
||||
Args:
|
||||
url_string: str (optional)
|
||||
entry: BatchEntry (optional)
|
||||
batch_id_string: str (optional)
|
||||
|
||||
Raises:
|
||||
MissingRequiredParameters
|
||||
"""
|
||||
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
|
||||
batch_id_string=batch_id_string,
|
||||
operation_string=BATCH_QUERY)
|
||||
|
||||
def GetBatchLink(self):
|
||||
for link in self.link:
|
||||
if link.rel == 'http://schemas.google.com/g/2005#batch':
|
||||
return link
|
||||
return None
|
||||
|
||||
|
||||
def BatchFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BatchFeed, xml_string)
|
||||
|
||||
|
||||
class EntryLink(atom.AtomBase):
|
||||
"""The gd:entryLink element"""
|
||||
|
||||
_tag = 'entryLink'
|
||||
_namespace = GDATA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
# The entry used to be an atom.Entry, now it is a GDataEntry.
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry)
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['readOnly'] = 'read_only'
|
||||
_attributes['href'] = 'href'
|
||||
|
||||
def __init__(self, href=None, read_only=None, rel=None,
|
||||
entry=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.href = href
|
||||
self.read_only = read_only
|
||||
self.rel = rel
|
||||
self.entry = entry
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def EntryLinkFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(EntryLink, xml_string)
|
||||
|
||||
|
||||
class FeedLink(atom.AtomBase):
|
||||
"""The gd:feedLink element"""
|
||||
|
||||
_tag = 'feedLink'
|
||||
_namespace = GDATA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed)
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['readOnly'] = 'read_only'
|
||||
_attributes['countHint'] = 'count_hint'
|
||||
_attributes['href'] = 'href'
|
||||
|
||||
def __init__(self, count_hint=None, href=None, read_only=None, rel=None,
|
||||
feed=None, extension_elements=None, extension_attributes=None,
|
||||
text=None):
|
||||
self.count_hint = count_hint
|
||||
self.href = href
|
||||
self.read_only = read_only
|
||||
self.rel = rel
|
||||
self.feed = feed
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def FeedLinkFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(FeedLink, xml_string)
|
||||
15
python/gdata/acl/__init__.py
Normal file
15
python/gdata/acl/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
55
python/gdata/acl/data.py
Normal file
55
python/gdata/acl/data.py
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains the data classes of the Google Access Control List (ACL) Extension"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
import atom.data
|
||||
import gdata.data
|
||||
import gdata.opensearch.data
|
||||
|
||||
|
||||
GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
|
||||
|
||||
|
||||
class AclRole(atom.core.XmlElement):
|
||||
"""Describes the role of an entry in an access control list."""
|
||||
_qname = GACL_TEMPLATE % 'role'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class AclScope(atom.core.XmlElement):
|
||||
"""Describes the scope of an entry in an access control list."""
|
||||
_qname = GACL_TEMPLATE % 'scope'
|
||||
type = 'type'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class AclEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry in a feed of an access control list (ACL)."""
|
||||
scope = AclScope
|
||||
role = AclRole
|
||||
|
||||
|
||||
class AclFeed(gdata.data.GDFeed):
|
||||
"""Describes a feed of an access control list (ACL)."""
|
||||
entry = [AclEntry]
|
||||
|
||||
|
||||
20
python/gdata/alt/__init__.py
Normal file
20
python/gdata/alt/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""This package's modules adapt the gdata library to run in other environments
|
||||
|
||||
The first example is the appengine module which contains functions and
|
||||
classes which modify a GDataService object to run on Google App Engine.
|
||||
"""
|
||||
101
python/gdata/alt/app_engine.py
Normal file
101
python/gdata/alt/app_engine.py
Normal file
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Provides functions to persist serialized auth tokens in the datastore.
|
||||
|
||||
The get_token and set_token functions should be used in conjunction with
|
||||
gdata.gauth's token_from_blob and token_to_blob to allow auth token objects
|
||||
to be reused across requests. It is up to your own code to ensure that the
|
||||
token key's are unique.
|
||||
"""
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
from google.appengine.ext import db
|
||||
from google.appengine.api import memcache
|
||||
|
||||
|
||||
class Token(db.Model):
|
||||
"""Datastore Model which stores a serialized auth token."""
|
||||
t = db.BlobProperty()
|
||||
|
||||
|
||||
def get_token(unique_key):
|
||||
"""Searches for a stored token with the desired key.
|
||||
|
||||
Checks memcache and then the datastore if required.
|
||||
|
||||
Args:
|
||||
unique_key: str which uniquely identifies the desired auth token.
|
||||
|
||||
Returns:
|
||||
A string encoding the auth token data. Use gdata.gauth.token_from_blob to
|
||||
convert back into a usable token object. None if the token was not found
|
||||
in memcache or the datastore.
|
||||
"""
|
||||
token_string = memcache.get(unique_key)
|
||||
if token_string is None:
|
||||
# The token wasn't in memcache, so look in the datastore.
|
||||
token = Token.get_by_key_name(unique_key)
|
||||
if token is None:
|
||||
return None
|
||||
return token.t
|
||||
return token_string
|
||||
|
||||
|
||||
def set_token(unique_key, token_str):
|
||||
"""Saves the serialized auth token in the datastore.
|
||||
|
||||
The token is also stored in memcache to speed up retrieval on a cache hit.
|
||||
|
||||
Args:
|
||||
unique_key: The unique name for this token as a string. It is up to your
|
||||
code to ensure that this token value is unique in your application.
|
||||
Previous values will be silently overwitten.
|
||||
token_str: A serialized auth token as a string. I expect that this string
|
||||
will be generated by gdata.gauth.token_to_blob.
|
||||
|
||||
Returns:
|
||||
True if the token was stored sucessfully, False if the token could not be
|
||||
safely cached (if an old value could not be cleared). If the token was
|
||||
set in memcache, but not in the datastore, this function will return None.
|
||||
However, in that situation an exception will likely be raised.
|
||||
|
||||
Raises:
|
||||
Datastore exceptions may be raised from the App Engine SDK in the event of
|
||||
failure.
|
||||
"""
|
||||
# First try to save in memcache.
|
||||
result = memcache.set(unique_key, token_str)
|
||||
# If memcache fails to save the value, clear the cached value.
|
||||
if not result:
|
||||
result = memcache.delete(unique_key)
|
||||
# If we could not clear the cached value for this token, refuse to save.
|
||||
if result == 0:
|
||||
return False
|
||||
# Save to the datastore.
|
||||
if Token(key_name=unique_key, t=token_str).put():
|
||||
return True
|
||||
return None
|
||||
|
||||
|
||||
def delete_token(unique_key):
|
||||
# Clear from memcache.
|
||||
memcache.delete(unique_key)
|
||||
# Clear from the datastore.
|
||||
Token(key_name=unique_key).delete()
|
||||
321
python/gdata/alt/appengine.py
Normal file
321
python/gdata/alt/appengine.py
Normal file
@@ -0,0 +1,321 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Provides HTTP functions for gdata.service to use on Google App Engine
|
||||
|
||||
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
|
||||
urlfetch API. Set the http_client member of a GDataService object to an
|
||||
instance of an AppEngineHttpClient to allow the gdata library to run on
|
||||
Google App Engine.
|
||||
|
||||
run_on_appengine: Function which will modify an existing GDataService object
|
||||
to allow it to run on App Engine. It works by creating a new instance of
|
||||
the AppEngineHttpClient and replacing the GDataService object's
|
||||
http_client.
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'api.jscudder (Jeff Scudder)'
|
||||
|
||||
|
||||
import StringIO
|
||||
import pickle
|
||||
import atom.http_interface
|
||||
import atom.token_store
|
||||
from google.appengine.api import urlfetch
|
||||
from google.appengine.ext import db
|
||||
from google.appengine.api import users
|
||||
from google.appengine.api import memcache
|
||||
|
||||
|
||||
def run_on_appengine(gdata_service, store_tokens=True,
|
||||
single_user_mode=False, deadline=None):
|
||||
"""Modifies a GDataService object to allow it to run on App Engine.
|
||||
|
||||
Args:
|
||||
gdata_service: An instance of AtomService, GDataService, or any
|
||||
of their subclasses which has an http_client member and a
|
||||
token_store member.
|
||||
store_tokens: Boolean, defaults to True. If True, the gdata_service
|
||||
will attempt to add each token to it's token_store when
|
||||
SetClientLoginToken or SetAuthSubToken is called. If False
|
||||
the tokens will not automatically be added to the
|
||||
token_store.
|
||||
single_user_mode: Boolean, defaults to False. If True, the current_token
|
||||
member of gdata_service will be set when
|
||||
SetClientLoginToken or SetAuthTubToken is called. If set
|
||||
to True, the current_token is set in the gdata_service
|
||||
and anyone who accesses the object will use the same
|
||||
token.
|
||||
|
||||
Note: If store_tokens is set to False and
|
||||
single_user_mode is set to False, all tokens will be
|
||||
ignored, since the library assumes: the tokens should not
|
||||
be stored in the datastore and they should not be stored
|
||||
in the gdata_service object. This will make it
|
||||
impossible to make requests which require authorization.
|
||||
deadline: int (optional) The number of seconds to wait for a response
|
||||
before timing out on the HTTP request. If no deadline is
|
||||
specified, the deafault deadline for HTTP requests from App
|
||||
Engine is used. The maximum is currently 10 (for 10 seconds).
|
||||
The default deadline for App Engine is 5 seconds.
|
||||
"""
|
||||
gdata_service.http_client = AppEngineHttpClient(deadline=deadline)
|
||||
gdata_service.token_store = AppEngineTokenStore()
|
||||
gdata_service.auto_store_tokens = store_tokens
|
||||
gdata_service.auto_set_current_token = single_user_mode
|
||||
return gdata_service
|
||||
|
||||
|
||||
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
|
||||
def __init__(self, headers=None, deadline=None):
|
||||
self.debug = False
|
||||
self.headers = headers or {}
|
||||
self.deadline = deadline
|
||||
|
||||
def request(self, operation, url, data=None, headers=None):
|
||||
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
|
||||
DELETE.
|
||||
|
||||
Usage example, perform and HTTP GET on http://www.google.com/:
|
||||
import atom.http
|
||||
client = atom.http.HttpClient()
|
||||
http_response = client.request('GET', 'http://www.google.com/')
|
||||
|
||||
Args:
|
||||
operation: str The HTTP operation to be performed. This is usually one
|
||||
of 'GET', 'POST', 'PUT', or 'DELETE'
|
||||
data: filestream, list of parts, or other object which can be converted
|
||||
to a string. Should be set to None when performing a GET or DELETE.
|
||||
If data is a file-like object which can be read, this method will
|
||||
read a chunk of 100K bytes at a time and send them.
|
||||
If the data is a list of parts to be sent, each part will be
|
||||
evaluated and sent.
|
||||
url: The full URL to which the request should be sent. Can be a string
|
||||
or atom.url.Url.
|
||||
headers: dict of strings. HTTP headers which should be sent
|
||||
in the request.
|
||||
"""
|
||||
all_headers = self.headers.copy()
|
||||
if headers:
|
||||
all_headers.update(headers)
|
||||
|
||||
# Construct the full payload.
|
||||
# Assume that data is None or a string.
|
||||
data_str = data
|
||||
if data:
|
||||
if isinstance(data, list):
|
||||
# If data is a list of different objects, convert them all to strings
|
||||
# and join them together.
|
||||
converted_parts = [_convert_data_part(x) for x in data]
|
||||
data_str = ''.join(converted_parts)
|
||||
else:
|
||||
data_str = _convert_data_part(data)
|
||||
|
||||
# If the list of headers does not include a Content-Length, attempt to
|
||||
# calculate it based on the data object.
|
||||
if data and 'Content-Length' not in all_headers:
|
||||
all_headers['Content-Length'] = str(len(data_str))
|
||||
|
||||
# Set the content type to the default value if none was set.
|
||||
if 'Content-Type' not in all_headers:
|
||||
all_headers['Content-Type'] = 'application/atom+xml'
|
||||
|
||||
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
|
||||
if operation == 'GET':
|
||||
method = urlfetch.GET
|
||||
elif operation == 'POST':
|
||||
method = urlfetch.POST
|
||||
elif operation == 'PUT':
|
||||
method = urlfetch.PUT
|
||||
elif operation == 'DELETE':
|
||||
method = urlfetch.DELETE
|
||||
else:
|
||||
method = None
|
||||
if self.deadline is None:
|
||||
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
|
||||
method=method, headers=all_headers, follow_redirects=False))
|
||||
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
|
||||
method=method, headers=all_headers, follow_redirects=False,
|
||||
deadline=self.deadline))
|
||||
|
||||
|
||||
def _convert_data_part(data):
|
||||
if not data or isinstance(data, str):
|
||||
return data
|
||||
elif hasattr(data, 'read'):
|
||||
# data is a file like object, so read it completely.
|
||||
return data.read()
|
||||
# The data object was not a file.
|
||||
# Try to convert to a string and send the data.
|
||||
return str(data)
|
||||
|
||||
|
||||
class HttpResponse(object):
|
||||
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
|
||||
|
||||
Used to allow the resoinse from HttpRequest to be usable by gdata.service
|
||||
methods.
|
||||
"""
|
||||
|
||||
def __init__(self, urlfetch_response):
|
||||
self.body = StringIO.StringIO(urlfetch_response.content)
|
||||
self.headers = urlfetch_response.headers
|
||||
self.status = urlfetch_response.status_code
|
||||
self.reason = ''
|
||||
|
||||
def read(self, length=None):
|
||||
if not length:
|
||||
return self.body.read()
|
||||
else:
|
||||
return self.body.read(length)
|
||||
|
||||
def getheader(self, name):
|
||||
if not self.headers.has_key(name):
|
||||
return self.headers[name.lower()]
|
||||
return self.headers[name]
|
||||
|
||||
|
||||
class TokenCollection(db.Model):
|
||||
"""Datastore Model which associates auth tokens with the current user."""
|
||||
user = db.UserProperty()
|
||||
pickled_tokens = db.BlobProperty()
|
||||
|
||||
|
||||
class AppEngineTokenStore(atom.token_store.TokenStore):
|
||||
"""Stores the user's auth tokens in the App Engine datastore.
|
||||
|
||||
Tokens are only written to the datastore if a user is signed in (if
|
||||
users.get_current_user() returns a user object).
|
||||
"""
|
||||
def __init__(self):
|
||||
self.user = None
|
||||
|
||||
def add_token(self, token):
|
||||
"""Associates the token with the current user and stores it.
|
||||
|
||||
If there is no current user, the token will not be stored.
|
||||
|
||||
Returns:
|
||||
False if the token was not stored.
|
||||
"""
|
||||
tokens = load_auth_tokens(self.user)
|
||||
if not hasattr(token, 'scopes') or not token.scopes:
|
||||
return False
|
||||
for scope in token.scopes:
|
||||
tokens[str(scope)] = token
|
||||
key = save_auth_tokens(tokens, self.user)
|
||||
if key:
|
||||
return True
|
||||
return False
|
||||
|
||||
def find_token(self, url):
|
||||
"""Searches the current user's collection of token for a token which can
|
||||
be used for a request to the url.
|
||||
|
||||
Returns:
|
||||
The stored token which belongs to the current user and is valid for the
|
||||
desired URL. If there is no current user, or there is no valid user
|
||||
token in the datastore, a atom.http_interface.GenericToken is returned.
|
||||
"""
|
||||
if url is None:
|
||||
return None
|
||||
if isinstance(url, (str, unicode)):
|
||||
url = atom.url.parse_url(url)
|
||||
tokens = load_auth_tokens(self.user)
|
||||
if url in tokens:
|
||||
token = tokens[url]
|
||||
if token.valid_for_scope(url):
|
||||
return token
|
||||
else:
|
||||
del tokens[url]
|
||||
save_auth_tokens(tokens, self.user)
|
||||
for scope, token in tokens.iteritems():
|
||||
if token.valid_for_scope(url):
|
||||
return token
|
||||
return atom.http_interface.GenericToken()
|
||||
|
||||
def remove_token(self, token):
|
||||
"""Removes the token from the current user's collection in the datastore.
|
||||
|
||||
Returns:
|
||||
False if the token was not removed, this could be because the token was
|
||||
not in the datastore, or because there is no current user.
|
||||
"""
|
||||
token_found = False
|
||||
scopes_to_delete = []
|
||||
tokens = load_auth_tokens(self.user)
|
||||
for scope, stored_token in tokens.iteritems():
|
||||
if stored_token == token:
|
||||
scopes_to_delete.append(scope)
|
||||
token_found = True
|
||||
for scope in scopes_to_delete:
|
||||
del tokens[scope]
|
||||
if token_found:
|
||||
save_auth_tokens(tokens, self.user)
|
||||
return token_found
|
||||
|
||||
def remove_all_tokens(self):
|
||||
"""Removes all of the current user's tokens from the datastore."""
|
||||
save_auth_tokens({}, self.user)
|
||||
|
||||
|
||||
def save_auth_tokens(token_dict, user=None):
|
||||
"""Associates the tokens with the current user and writes to the datastore.
|
||||
|
||||
If there us no current user, the tokens are not written and this function
|
||||
returns None.
|
||||
|
||||
Returns:
|
||||
The key of the datastore entity containing the user's tokens, or None if
|
||||
there was no current user.
|
||||
"""
|
||||
if user is None:
|
||||
user = users.get_current_user()
|
||||
if user is None:
|
||||
return None
|
||||
memcache.set('gdata_pickled_tokens:%s' % user, pickle.dumps(token_dict))
|
||||
user_tokens = TokenCollection.all().filter('user =', user).get()
|
||||
if user_tokens:
|
||||
user_tokens.pickled_tokens = pickle.dumps(token_dict)
|
||||
return user_tokens.put()
|
||||
else:
|
||||
user_tokens = TokenCollection(
|
||||
user=user,
|
||||
pickled_tokens=pickle.dumps(token_dict))
|
||||
return user_tokens.put()
|
||||
|
||||
|
||||
def load_auth_tokens(user=None):
|
||||
"""Reads a dictionary of the current user's tokens from the datastore.
|
||||
|
||||
If there is no current user (a user is not signed in to the app) or the user
|
||||
does not have any tokens, an empty dictionary is returned.
|
||||
"""
|
||||
if user is None:
|
||||
user = users.get_current_user()
|
||||
if user is None:
|
||||
return {}
|
||||
pickled_tokens = memcache.get('gdata_pickled_tokens:%s' % user)
|
||||
if pickled_tokens:
|
||||
return pickle.loads(pickled_tokens)
|
||||
user_tokens = TokenCollection.all().filter('user =', user).get()
|
||||
if user_tokens:
|
||||
memcache.set('gdata_pickled_tokens:%s' % user, user_tokens.pickled_tokens)
|
||||
return pickle.loads(user_tokens.pickled_tokens)
|
||||
return {}
|
||||
|
||||
223
python/gdata/analytics/__init__.py
Normal file
223
python/gdata/analytics/__init__.py
Normal file
@@ -0,0 +1,223 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Original Copyright (C) 2006 Google Inc.
|
||||
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Note that this module will not function without specifically adding
|
||||
# 'analytics': [ #Google Analytics
|
||||
# 'https://www.google.com/analytics/feeds/'],
|
||||
# to CLIENT_LOGIN_SCOPES in the gdata/service.py file
|
||||
|
||||
"""Contains extensions to Atom objects used with Google Analytics."""
|
||||
|
||||
__author__ = 'api.suryasev (Sal Uryasev)'
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
GAN_NAMESPACE = 'http://schemas.google.com/analytics/2009'
|
||||
|
||||
class TableId(gdata.GDataEntry):
|
||||
"""tableId element."""
|
||||
_tag = 'tableId'
|
||||
_namespace = GAN_NAMESPACE
|
||||
|
||||
class Property(gdata.GDataEntry):
|
||||
_tag = 'property'
|
||||
_namespace = GAN_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
|
||||
_attributes['name'] = 'name'
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, name=None, value=None, *args, **kwargs):
|
||||
self.name = name
|
||||
self.value = value
|
||||
super(Property, self).__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
def __repr__(self):
|
||||
return self.value
|
||||
|
||||
class AccountListEntry(gdata.GDataEntry):
|
||||
"""The Google Documents version of an Atom Entry"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}tableId' % GAN_NAMESPACE] = ('tableId',
|
||||
[TableId])
|
||||
_children['{%s}property' % GAN_NAMESPACE] = ('property',
|
||||
[Property])
|
||||
|
||||
def __init__(self, tableId=None, property=None,
|
||||
*args, **kwargs):
|
||||
self.tableId = tableId
|
||||
self.property = property
|
||||
super(AccountListEntry, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
def AccountListEntryFromString(xml_string):
|
||||
"""Converts an XML string into an AccountListEntry object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Document List feed entry.
|
||||
|
||||
Returns:
|
||||
A AccountListEntry object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(AccountListEntry, xml_string)
|
||||
|
||||
|
||||
class AccountListFeed(gdata.GDataFeed):
|
||||
"""A feed containing a list of Google Documents Items"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
|
||||
[AccountListEntry])
|
||||
|
||||
|
||||
def AccountListFeedFromString(xml_string):
|
||||
"""Converts an XML string into an AccountListFeed object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing an AccountList feed.
|
||||
|
||||
Returns:
|
||||
An AccountListFeed object corresponding to the given XML.
|
||||
All properties are also linked to with a direct reference
|
||||
from each entry object for convenience. (e.g. entry.AccountName)
|
||||
"""
|
||||
feed = atom.CreateClassFromXMLString(AccountListFeed, xml_string)
|
||||
for entry in feed.entry:
|
||||
for pro in entry.property:
|
||||
entry.__dict__[pro.name.replace('ga:','')] = pro
|
||||
for td in entry.tableId:
|
||||
td.__dict__['value'] = td.text
|
||||
return feed
|
||||
|
||||
class Dimension(gdata.GDataEntry):
|
||||
_tag = 'dimension'
|
||||
_namespace = GAN_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
|
||||
_attributes['name'] = 'name'
|
||||
_attributes['value'] = 'value'
|
||||
_attributes['type'] = 'type'
|
||||
_attributes['confidenceInterval'] = 'confidence_interval'
|
||||
|
||||
def __init__(self, name=None, value=None, type=None,
|
||||
confidence_interval = None, *args, **kwargs):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.type = type
|
||||
self.confidence_interval = confidence_interval
|
||||
super(Dimension, self).__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
def __repr__(self):
|
||||
return self.value
|
||||
|
||||
class Metric(gdata.GDataEntry):
|
||||
_tag = 'metric'
|
||||
_namespace = GAN_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
|
||||
_attributes['name'] = 'name'
|
||||
_attributes['value'] = 'value'
|
||||
_attributes['type'] = 'type'
|
||||
_attributes['confidenceInterval'] = 'confidence_interval'
|
||||
|
||||
def __init__(self, name=None, value=None, type=None,
|
||||
confidence_interval = None, *args, **kwargs):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.type = type
|
||||
self.confidence_interval = confidence_interval
|
||||
super(Metric, self).__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
def __repr__(self):
|
||||
return self.value
|
||||
|
||||
class AnalyticsDataEntry(gdata.GDataEntry):
|
||||
"""The Google Analytics version of an Atom Entry"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
|
||||
_children['{%s}dimension' % GAN_NAMESPACE] = ('dimension',
|
||||
[Dimension])
|
||||
|
||||
_children['{%s}metric' % GAN_NAMESPACE] = ('metric',
|
||||
[Metric])
|
||||
|
||||
def __init__(self, dimension=None, metric=None, *args, **kwargs):
|
||||
self.dimension = dimension
|
||||
self.metric = metric
|
||||
|
||||
super(AnalyticsDataEntry, self).__init__(*args, **kwargs)
|
||||
|
||||
class AnalyticsDataFeed(gdata.GDataFeed):
|
||||
"""A feed containing a list of Google Analytics Data Feed"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
|
||||
[AnalyticsDataEntry])
|
||||
|
||||
|
||||
"""
|
||||
Data Feed
|
||||
"""
|
||||
|
||||
def AnalyticsDataFeedFromString(xml_string):
|
||||
"""Converts an XML string into an AccountListFeed object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing an AccountList feed.
|
||||
|
||||
Returns:
|
||||
An AccountListFeed object corresponding to the given XML.
|
||||
Each metric and dimension is also referenced directly from
|
||||
the entry for easier access. (e.g. entry.keyword.value)
|
||||
"""
|
||||
feed = atom.CreateClassFromXMLString(AnalyticsDataFeed, xml_string)
|
||||
if feed.entry:
|
||||
for entry in feed.entry:
|
||||
for met in entry.metric:
|
||||
entry.__dict__[met.name.replace('ga:','')] = met
|
||||
if entry.dimension is not None:
|
||||
for dim in entry.dimension:
|
||||
entry.__dict__[dim.name.replace('ga:','')] = dim
|
||||
|
||||
return feed
|
||||
313
python/gdata/analytics/client.py
Normal file
313
python/gdata/analytics/client.py
Normal file
@@ -0,0 +1,313 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2010 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Streamlines requests to the Google Analytics APIs."""
|
||||
|
||||
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
|
||||
|
||||
|
||||
import atom.data
|
||||
import gdata.client
|
||||
import gdata.analytics.data
|
||||
import gdata.gauth
|
||||
|
||||
|
||||
class AnalyticsClient(gdata.client.GDClient):
|
||||
"""Client extension for the Google Analytics API service."""
|
||||
|
||||
api_version = '2'
|
||||
auth_service = 'analytics'
|
||||
auth_scopes = gdata.gauth.AUTH_SCOPES['analytics']
|
||||
account_type = 'GOOGLE'
|
||||
|
||||
def __init__(self, auth_token=None, **kwargs):
|
||||
"""Initializes a new client for the Google Analytics Data Export API.
|
||||
|
||||
Args:
|
||||
auth_token: gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken (optional) Authorizes this client to edit the user's data.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient
|
||||
constructor.
|
||||
"""
|
||||
|
||||
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
|
||||
|
||||
def get_account_feed(self, feed_uri, auth_token=None, **kwargs):
|
||||
"""Makes a request to the Analytics API Account Feed.
|
||||
|
||||
Args:
|
||||
feed_uri: str or gdata.analytics.AccountFeedQuery The Analytics Account
|
||||
Feed uri to define what data to retrieve from the API. Can also be
|
||||
used with a gdata.analytics.AccountFeedQuery object.
|
||||
"""
|
||||
|
||||
return self.get_feed(feed_uri,
|
||||
desired_class=gdata.analytics.data.AccountFeed,
|
||||
auth_token=auth_token,
|
||||
**kwargs)
|
||||
|
||||
GetAccountFeed = get_account_feed
|
||||
|
||||
def get_data_feed(self, feed_uri, auth_token=None, **kwargs):
|
||||
"""Makes a request to the Analytics API Data Feed.
|
||||
|
||||
Args:
|
||||
feed_uri: str or gdata.analytics.AccountFeedQuery The Analytics Data
|
||||
Feed uri to define what data to retrieve from the API. Can also be
|
||||
used with a gdata.analytics.AccountFeedQuery object.
|
||||
"""
|
||||
|
||||
return self.get_feed(feed_uri,
|
||||
desired_class=gdata.analytics.data.DataFeed,
|
||||
auth_token=auth_token,
|
||||
**kwargs)
|
||||
|
||||
GetDataFeed = get_data_feed
|
||||
|
||||
def get_management_feed(self, feed_uri, auth_token=None, **kwargs):
|
||||
"""Makes a request to the Google Analytics Management API.
|
||||
|
||||
The Management API provides read-only access to configuration data for
|
||||
Google Analytics and supercedes the Data Export API Account Feed.
|
||||
The Management API supports 5 feeds: account, web property, profile,
|
||||
goal, advanced segment.
|
||||
|
||||
You can access each feed through the respective management query class
|
||||
below. All requests return the same data object.
|
||||
|
||||
Args:
|
||||
feed_uri: str or AccountQuery, WebPropertyQuery,
|
||||
ProfileQuery, GoalQuery, MgmtAdvSegFeedQuery
|
||||
The Management API Feed uri to define which feed to retrieve.
|
||||
Either use a string or one of the wrapper classes.
|
||||
"""
|
||||
|
||||
return self.get_feed(feed_uri,
|
||||
desired_class=gdata.analytics.data.ManagementFeed,
|
||||
auth_token=auth_token,
|
||||
**kwargs)
|
||||
|
||||
GetMgmtFeed = GetManagementFeed = get_management_feed
|
||||
|
||||
|
||||
class AnalyticsBaseQuery(gdata.client.GDQuery):
|
||||
"""Abstracts common configuration across all query objects.
|
||||
|
||||
Attributes:
|
||||
scheme: string The default scheme. Should always be https.
|
||||
host: string The default host.
|
||||
"""
|
||||
|
||||
scheme = 'https'
|
||||
host = 'www.google.com'
|
||||
|
||||
|
||||
class AccountFeedQuery(AnalyticsBaseQuery):
|
||||
"""Account Feed query class to simplify constructing Account Feed Urls.
|
||||
|
||||
To use this class, you can either pass a dict in the constructor that has
|
||||
all the data feed query parameters as keys:
|
||||
queryUrl = AccountFeedQuery({'max-results': '10000'})
|
||||
|
||||
Alternatively you can add new parameters directly to the query object:
|
||||
queryUrl = AccountFeedQuery()
|
||||
queryUrl.query['max-results'] = '10000'
|
||||
|
||||
Args:
|
||||
query: dict (optional) Contains all the GA Data Feed query parameters
|
||||
as keys.
|
||||
"""
|
||||
|
||||
path = '/analytics/feeds/accounts/default'
|
||||
|
||||
def __init__(self, query={}, **kwargs):
|
||||
self.query = query
|
||||
gdata.client.GDQuery(self, **kwargs)
|
||||
|
||||
|
||||
class DataFeedQuery(AnalyticsBaseQuery):
|
||||
"""Data Feed query class to simplify constructing Data Feed Urls.
|
||||
|
||||
To use this class, you can either pass a dict in the constructor that has
|
||||
all the data feed query parameters as keys:
|
||||
queryUrl = DataFeedQuery({'start-date': '2008-10-01'})
|
||||
|
||||
Alternatively you can add new parameters directly to the query object:
|
||||
queryUrl = DataFeedQuery()
|
||||
queryUrl.query['start-date'] = '2008-10-01'
|
||||
|
||||
Args:
|
||||
query: dict (optional) Contains all the GA Data Feed query parameters
|
||||
as keys.
|
||||
"""
|
||||
|
||||
path = '/analytics/feeds/data'
|
||||
|
||||
def __init__(self, query={}, **kwargs):
|
||||
self.query = query
|
||||
gdata.client.GDQuery(self, **kwargs)
|
||||
|
||||
|
||||
class AccountQuery(AnalyticsBaseQuery):
|
||||
"""Management API Account Feed query class.
|
||||
|
||||
Example Usage:
|
||||
queryUrl = AccountQuery()
|
||||
queryUrl = AccountQuery({'max-results': 100})
|
||||
|
||||
queryUrl2 = AccountQuery()
|
||||
queryUrl2.query['max-results'] = 100
|
||||
|
||||
Args:
|
||||
query: dict (optional) A dictionary of query parameters.
|
||||
"""
|
||||
|
||||
path = '/analytics/feeds/datasources/ga/accounts'
|
||||
|
||||
def __init__(self, query={}, **kwargs):
|
||||
self.query = query
|
||||
gdata.client.GDQuery(self, **kwargs)
|
||||
|
||||
class WebPropertyQuery(AnalyticsBaseQuery):
|
||||
"""Management API Web Property Feed query class.
|
||||
|
||||
Example Usage:
|
||||
queryUrl = WebPropertyQuery()
|
||||
queryUrl = WebPropertyQuery('123', {'max-results': 100})
|
||||
queryUrl = WebPropertyQuery(acct_id='123',
|
||||
query={'max-results': 100})
|
||||
|
||||
queryUrl2 = WebPropertyQuery()
|
||||
queryUrl2.acct_id = '1234'
|
||||
queryUrl2.query['max-results'] = 100
|
||||
|
||||
Args:
|
||||
acct_id: string (optional) The account ID to filter results.
|
||||
Default is ~all.
|
||||
query: dict (optional) A dictionary of query parameters.
|
||||
"""
|
||||
|
||||
def __init__(self, acct_id='~all', query={}, **kwargs):
|
||||
self.acct_id = acct_id
|
||||
self.query = query
|
||||
gdata.client.GDQuery(self, **kwargs)
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
"""Wrapper for path attribute."""
|
||||
return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties' %
|
||||
self.acct_id)
|
||||
|
||||
|
||||
class ProfileQuery(AnalyticsBaseQuery):
|
||||
"""Management API Profile Feed query class.
|
||||
|
||||
Example Usage:
|
||||
queryUrl = ProfileQuery()
|
||||
queryUrl = ProfileQuery('123', 'UA-123-1', {'max-results': 100})
|
||||
queryUrl = ProfileQuery(acct_id='123',
|
||||
web_prop_id='UA-123-1',
|
||||
query={'max-results': 100})
|
||||
|
||||
queryUrl2 = ProfileQuery()
|
||||
queryUrl2.acct_id = '123'
|
||||
queryUrl2.web_prop_id = 'UA-123-1'
|
||||
queryUrl2.query['max-results'] = 100
|
||||
|
||||
Args:
|
||||
acct_id: string (optional) The account ID to filter results.
|
||||
Default is ~all.
|
||||
web_prop_id: string (optional) The web property ID to filter results.
|
||||
Default is ~all.
|
||||
query: dict (optional) A dictionary of query parameters.
|
||||
"""
|
||||
|
||||
def __init__(self, acct_id='~all', web_prop_id='~all', query={}, **kwargs):
|
||||
self.acct_id = acct_id
|
||||
self.web_prop_id = web_prop_id
|
||||
self.query = query
|
||||
gdata.client.GDQuery(self, **kwargs)
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
"""Wrapper for path attribute."""
|
||||
return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties'
|
||||
'/%s/profiles' % (self.acct_id, self.web_prop_id))
|
||||
|
||||
|
||||
class GoalQuery(AnalyticsBaseQuery):
|
||||
"""Management API Goal Feed query class.
|
||||
|
||||
Example Usage:
|
||||
queryUrl = GoalQuery()
|
||||
queryUrl = GoalQuery('123', 'UA-123-1', '555',
|
||||
{'max-results': 100})
|
||||
queryUrl = GoalQuery(acct_id='123',
|
||||
web_prop_id='UA-123-1',
|
||||
profile_id='555',
|
||||
query={'max-results': 100})
|
||||
|
||||
queryUrl2 = GoalQuery()
|
||||
queryUrl2.acct_id = '123'
|
||||
queryUrl2.web_prop_id = 'UA-123-1'
|
||||
queryUrl2.query['max-results'] = 100
|
||||
|
||||
Args:
|
||||
acct_id: string (optional) The account ID to filter results.
|
||||
Default is ~all.
|
||||
web_prop_id: string (optional) The web property ID to filter results.
|
||||
Default is ~all.
|
||||
profile_id: string (optional) The profile ID to filter results.
|
||||
Default is ~all.
|
||||
query: dict (optional) A dictionary of query parameters.
|
||||
"""
|
||||
|
||||
def __init__(self, acct_id='~all', web_prop_id='~all', profile_id='~all',
|
||||
query={}, **kwargs):
|
||||
self.acct_id = acct_id
|
||||
self.web_prop_id = web_prop_id
|
||||
self.profile_id = profile_id
|
||||
self.query = query or {}
|
||||
gdata.client.GDQuery(self, **kwargs)
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
"""Wrapper for path attribute."""
|
||||
return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties'
|
||||
'/%s/profiles/%s/goals' % (self.acct_id, self.web_prop_id,
|
||||
self.profile_id))
|
||||
|
||||
|
||||
class AdvSegQuery(AnalyticsBaseQuery):
|
||||
"""Management API Goal Feed query class.
|
||||
|
||||
Example Usage:
|
||||
queryUrl = AdvSegQuery()
|
||||
queryUrl = AdvSegQuery({'max-results': 100})
|
||||
|
||||
queryUrl1 = AdvSegQuery()
|
||||
queryUrl1.query['max-results'] = 100
|
||||
|
||||
Args:
|
||||
query: dict (optional) A dictionary of query parameters.
|
||||
"""
|
||||
|
||||
path = '/analytics/feeds/datasources/ga/segments'
|
||||
|
||||
def __init__(self, query={}, **kwargs):
|
||||
self.query = query
|
||||
gdata.client.GDQuery(self, **kwargs)
|
||||
|
||||
365
python/gdata/analytics/data.py
Normal file
365
python/gdata/analytics/data.py
Normal file
@@ -0,0 +1,365 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2010 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Data model classes for parsing and generating XML for both the
|
||||
Google Analytics Data Export and Management APIs. Although both APIs
|
||||
operate on different parts of Google Analytics, they share common XML
|
||||
elements and are released in the same module.
|
||||
|
||||
The Management API supports 5 feeds all using the same ManagementFeed
|
||||
data class.
|
||||
"""
|
||||
|
||||
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
|
||||
|
||||
|
||||
import gdata.data
|
||||
import atom.core
|
||||
import atom.data
|
||||
|
||||
|
||||
# XML Namespace used in Google Analytics API entities.
|
||||
DXP_NS = '{http://schemas.google.com/analytics/2009}%s'
|
||||
GA_NS = '{http://schemas.google.com/ga/2009}%s'
|
||||
GD_NS = '{http://schemas.google.com/g/2005}%s'
|
||||
|
||||
|
||||
class GetProperty(object):
|
||||
"""Utility class to simplify retrieving Property objects."""
|
||||
|
||||
def get_property(self, name):
|
||||
"""Helper method to return a propery object by its name attribute.
|
||||
|
||||
Args:
|
||||
name: string The name of the <dxp:property> element to retrieve.
|
||||
|
||||
Returns:
|
||||
A property object corresponding to the matching <dxp:property> element.
|
||||
if no property is found, None is returned.
|
||||
"""
|
||||
|
||||
for prop in self.property:
|
||||
if prop.name == name:
|
||||
return prop
|
||||
|
||||
return None
|
||||
|
||||
GetProperty = get_property
|
||||
|
||||
|
||||
class GetMetric(object):
|
||||
"""Utility class to simplify retrieving Metric objects."""
|
||||
|
||||
def get_metric(self, name):
|
||||
"""Helper method to return a propery value by its name attribute
|
||||
|
||||
Args:
|
||||
name: string The name of the <dxp:metric> element to retrieve.
|
||||
|
||||
Returns:
|
||||
A property object corresponding to the matching <dxp:metric> element.
|
||||
if no property is found, None is returned.
|
||||
"""
|
||||
|
||||
for met in self.metric:
|
||||
if met.name == name:
|
||||
return met
|
||||
|
||||
return None
|
||||
|
||||
GetMetric = get_metric
|
||||
|
||||
|
||||
class GetDimension(object):
|
||||
"""Utility class to simplify retrieving Dimension objects."""
|
||||
|
||||
def get_dimension(self, name):
|
||||
"""Helper method to return a dimention object by its name attribute
|
||||
|
||||
Args:
|
||||
name: string The name of the <dxp:dimension> element to retrieve.
|
||||
|
||||
Returns:
|
||||
A dimension object corresponding to the matching <dxp:dimension> element.
|
||||
if no dimension is found, None is returned.
|
||||
"""
|
||||
|
||||
for dim in self.dimension:
|
||||
if dim.name == name:
|
||||
return dim
|
||||
|
||||
return None
|
||||
|
||||
GetDimension = get_dimension
|
||||
|
||||
|
||||
class GaLinkFinder(object):
|
||||
"""Utility class to return specific links in Google Analytics feeds."""
|
||||
|
||||
def get_parent_links(self):
|
||||
"""Returns a list of all the parent links in an entry."""
|
||||
|
||||
links = []
|
||||
for link in self.link:
|
||||
if link.rel == link.parent():
|
||||
links.append(link)
|
||||
|
||||
return links
|
||||
|
||||
GetParentLinks = get_parent_links
|
||||
|
||||
def get_child_links(self):
|
||||
"""Returns a list of all the child links in an entry."""
|
||||
|
||||
links = []
|
||||
for link in self.link:
|
||||
if link.rel == link.child():
|
||||
links.append(link)
|
||||
|
||||
return links
|
||||
|
||||
GetChildLinks = get_child_links
|
||||
|
||||
def get_child_link(self, target_kind):
|
||||
"""Utility method to return one child link.
|
||||
|
||||
Returns:
|
||||
A child link with the given target_kind. None if the target_kind was
|
||||
not found.
|
||||
"""
|
||||
|
||||
for link in self.link:
|
||||
if link.rel == link.child() and link.target_kind == target_kind:
|
||||
return link
|
||||
|
||||
return None
|
||||
|
||||
GetChildLink = get_child_link
|
||||
|
||||
|
||||
class StartDate(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:startDate>"""
|
||||
_qname = DXP_NS % 'startDate'
|
||||
|
||||
|
||||
class EndDate(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:endDate>"""
|
||||
_qname = DXP_NS % 'endDate'
|
||||
|
||||
|
||||
class Metric(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:metric>"""
|
||||
_qname = DXP_NS % 'metric'
|
||||
name = 'name'
|
||||
type = 'type'
|
||||
value = 'value'
|
||||
confidence_interval = 'confidenceInterval'
|
||||
|
||||
|
||||
class Aggregates(atom.core.XmlElement, GetMetric):
|
||||
"""Analytics Data Feed <dxp:aggregates>"""
|
||||
_qname = DXP_NS % 'aggregates'
|
||||
metric = [Metric]
|
||||
|
||||
|
||||
class TableId(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:tableId>"""
|
||||
_qname = DXP_NS % 'tableId'
|
||||
|
||||
|
||||
class TableName(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:tableName>"""
|
||||
_qname = DXP_NS % 'tableName'
|
||||
|
||||
|
||||
class Property(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:property>"""
|
||||
_qname = DXP_NS % 'property'
|
||||
name = 'name'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class Definition(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:definition>"""
|
||||
_qname = DXP_NS % 'definition'
|
||||
|
||||
|
||||
class Segment(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:segment>"""
|
||||
_qname = DXP_NS % 'segment'
|
||||
id = 'id'
|
||||
name = 'name'
|
||||
definition = Definition
|
||||
|
||||
|
||||
class Engagement(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:engagement>"""
|
||||
_qname = GA_NS % 'engagement'
|
||||
type = 'type'
|
||||
comparison = 'comparison'
|
||||
threshold_value = 'thresholdValue'
|
||||
|
||||
|
||||
class Step(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:step>"""
|
||||
_qname = GA_NS % 'step'
|
||||
number = 'number'
|
||||
name = 'name'
|
||||
path = 'path'
|
||||
|
||||
|
||||
class Destination(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:destination>"""
|
||||
_qname = GA_NS % 'destination'
|
||||
step = [Step]
|
||||
expression = 'expression'
|
||||
case_sensitive = 'caseSensitive'
|
||||
match_type = 'matchType'
|
||||
step1_required = 'step1Required'
|
||||
|
||||
|
||||
class Goal(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:goal>"""
|
||||
_qname = GA_NS % 'goal'
|
||||
destination = Destination
|
||||
engagement = Engagement
|
||||
number = 'number'
|
||||
name = 'name'
|
||||
value = 'value'
|
||||
active = 'active'
|
||||
|
||||
|
||||
class CustomVariable(atom.core.XmlElement):
|
||||
"""Analytics Data Feed <dxp:customVariable>"""
|
||||
_qname = GA_NS % 'customVariable'
|
||||
index = 'index'
|
||||
name = 'name'
|
||||
scope = 'scope'
|
||||
|
||||
|
||||
class DataSource(atom.core.XmlElement, GetProperty):
|
||||
"""Analytics Data Feed <dxp:dataSource>"""
|
||||
_qname = DXP_NS % 'dataSource'
|
||||
table_id = TableId
|
||||
table_name = TableName
|
||||
property = [Property]
|
||||
|
||||
|
||||
class Dimension(atom.core.XmlElement):
|
||||
"""Analytics Feed <dxp:dimension>"""
|
||||
_qname = DXP_NS % 'dimension'
|
||||
name = 'name'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class AnalyticsLink(atom.data.Link):
|
||||
"""Subclass of link <link>"""
|
||||
target_kind = GD_NS % 'targetKind'
|
||||
|
||||
@classmethod
|
||||
def parent(cls):
|
||||
"""Parent target_kind"""
|
||||
return '%s#parent' % GA_NS[1:-3]
|
||||
|
||||
@classmethod
|
||||
def child(cls):
|
||||
"""Child target_kind"""
|
||||
return '%s#child' % GA_NS[1:-3]
|
||||
|
||||
|
||||
# Account Feed.
|
||||
class AccountEntry(gdata.data.GDEntry, GetProperty):
|
||||
"""Analytics Account Feed <entry>"""
|
||||
_qname = atom.data.ATOM_TEMPLATE % 'entry'
|
||||
table_id = TableId
|
||||
property = [Property]
|
||||
goal = [Goal]
|
||||
custom_variable = [CustomVariable]
|
||||
|
||||
|
||||
class AccountFeed(gdata.data.GDFeed):
|
||||
"""Analytics Account Feed <feed>"""
|
||||
_qname = atom.data.ATOM_TEMPLATE % 'feed'
|
||||
segment = [Segment]
|
||||
entry = [AccountEntry]
|
||||
|
||||
|
||||
# Data Feed.
|
||||
class DataEntry(gdata.data.GDEntry, GetMetric, GetDimension):
|
||||
"""Analytics Data Feed <entry>"""
|
||||
_qname = atom.data.ATOM_TEMPLATE % 'entry'
|
||||
dimension = [Dimension]
|
||||
metric = [Metric]
|
||||
|
||||
def get_object(self, name):
|
||||
"""Returns either a Dimension or Metric object with the same name as the
|
||||
name parameter.
|
||||
|
||||
Args:
|
||||
name: string The name of the object to retrieve.
|
||||
|
||||
Returns:
|
||||
Either a Dimension or Object that has the same as the name parameter.
|
||||
"""
|
||||
|
||||
output = self.GetDimension(name)
|
||||
if not output:
|
||||
output = self.GetMetric(name)
|
||||
|
||||
return output
|
||||
|
||||
GetObject = get_object
|
||||
|
||||
|
||||
class DataFeed(gdata.data.GDFeed):
|
||||
"""Analytics Data Feed <feed>.
|
||||
|
||||
Although there is only one datasource, it is stored in an array to replicate
|
||||
the design of the Java client library and ensure backwards compatibility if
|
||||
new data sources are added in the future.
|
||||
"""
|
||||
|
||||
_qname = atom.data.ATOM_TEMPLATE % 'feed'
|
||||
start_date = StartDate
|
||||
end_date = EndDate
|
||||
aggregates = Aggregates
|
||||
data_source = [DataSource]
|
||||
entry = [DataEntry]
|
||||
segment = Segment
|
||||
|
||||
|
||||
# Management Feed.
|
||||
class ManagementEntry(gdata.data.GDEntry, GetProperty, GaLinkFinder):
|
||||
"""Analytics Managememt Entry <entry>."""
|
||||
|
||||
_qname = atom.data.ATOM_TEMPLATE % 'entry'
|
||||
kind = GD_NS % 'kind'
|
||||
property = [Property]
|
||||
goal = Goal
|
||||
segment = Segment
|
||||
link = [AnalyticsLink]
|
||||
|
||||
|
||||
class ManagementFeed(gdata.data.GDFeed):
|
||||
"""Analytics Management Feed <feed>.
|
||||
|
||||
This class holds the data for all 5 Management API feeds: Account,
|
||||
Web Property, Profile, Goal, and Advanced Segment Feeds.
|
||||
"""
|
||||
|
||||
_qname = atom.data.ATOM_TEMPLATE % 'feed'
|
||||
entry = [ManagementEntry]
|
||||
kind = GD_NS % 'kind'
|
||||
331
python/gdata/analytics/service.py
Normal file
331
python/gdata/analytics/service.py
Normal file
@@ -0,0 +1,331 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2006 Google Inc.
|
||||
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
AccountsService extends the GDataService to streamline Google Analytics
|
||||
account information operations.
|
||||
|
||||
AnalyticsDataService: Provides methods to query google analytics data feeds.
|
||||
Extends GDataService.
|
||||
|
||||
DataQuery: Queries a Google Analytics Data list feed.
|
||||
|
||||
AccountQuery: Queries a Google Analytics Account list feed.
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'api.suryasev (Sal Uryasev)'
|
||||
|
||||
|
||||
import urllib
|
||||
import atom
|
||||
import gdata.service
|
||||
import gdata.analytics
|
||||
|
||||
|
||||
class AccountsService(gdata.service.GDataService):
|
||||
|
||||
"""Client extension for the Google Analytics Account List feed."""
|
||||
|
||||
def __init__(self, email="", password=None, source=None,
|
||||
server='www.google.com/analytics', additional_headers=None,
|
||||
**kwargs):
|
||||
"""Creates a client for the Google Analytics service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service='analytics',
|
||||
source=source, server=server, additional_headers=additional_headers,
|
||||
**kwargs)
|
||||
|
||||
def QueryAccountListFeed(self, uri):
|
||||
"""Retrieves an AccountListFeed by retrieving a URI based off the Document
|
||||
List feed, including any query parameters. An AccountListFeed object
|
||||
can be used to construct these parameters.
|
||||
|
||||
Args:
|
||||
uri: string The URI of the feed being retrieved possibly with query
|
||||
parameters.
|
||||
|
||||
Returns:
|
||||
An AccountListFeed object representing the feed returned by the server.
|
||||
"""
|
||||
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
|
||||
|
||||
def GetAccountListEntry(self, uri):
|
||||
"""Retrieves a particular AccountListEntry by its unique URI.
|
||||
|
||||
Args:
|
||||
uri: string The unique URI of an entry in an Account List feed.
|
||||
|
||||
Returns:
|
||||
An AccountLisFeed object representing the retrieved entry.
|
||||
"""
|
||||
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
|
||||
|
||||
def GetAccountList(self, max_results=1000, text_query=None,
|
||||
params=None, categories=None):
|
||||
"""Retrieves a feed containing all of a user's accounts and profiles."""
|
||||
q = gdata.analytics.service.AccountQuery(max_results=max_results,
|
||||
text_query=text_query,
|
||||
params=params,
|
||||
categories=categories);
|
||||
return self.QueryAccountListFeed(q.ToUri())
|
||||
|
||||
|
||||
|
||||
|
||||
class AnalyticsDataService(gdata.service.GDataService):
|
||||
|
||||
"""Client extension for the Google Analytics service Data List feed."""
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server='www.google.com/analytics', additional_headers=None,
|
||||
**kwargs):
|
||||
"""Creates a client for the Google Analytics service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'docs.google.com'.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
|
||||
gdata.service.GDataService.__init__(self,
|
||||
email=email, password=password, service='analytics', source=source,
|
||||
server=server, additional_headers=additional_headers, **kwargs)
|
||||
|
||||
def GetData(self, ids='', dimensions='', metrics='',
|
||||
sort='', filters='', start_date='',
|
||||
end_date='', start_index='',
|
||||
max_results=''):
|
||||
"""Retrieves a feed containing a user's data
|
||||
|
||||
ids: comma-separated string of analytics accounts.
|
||||
dimensions: comma-separated string of dimensions.
|
||||
metrics: comma-separated string of metrics.
|
||||
sort: comma-separated string of dimensions and metrics for sorting.
|
||||
This may be previxed with a minus to sort in reverse order.
|
||||
(e.g. '-ga:keyword')
|
||||
If ommited, the first dimension passed in will be used.
|
||||
filters: comma-separated string of filter parameters.
|
||||
(e.g. 'ga:keyword==google')
|
||||
start_date: start date for data pull.
|
||||
end_date: end date for data pull.
|
||||
start_index: used in combination with max_results to pull more than 1000
|
||||
entries. This defaults to 1.
|
||||
max_results: maximum results that the pull will return. This defaults
|
||||
to, and maxes out at 1000.
|
||||
"""
|
||||
q = gdata.analytics.service.DataQuery(ids=ids,
|
||||
dimensions=dimensions,
|
||||
metrics=metrics,
|
||||
filters=filters,
|
||||
sort=sort,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
start_index=start_index,
|
||||
max_results=max_results);
|
||||
return self.AnalyticsDataFeed(q.ToUri())
|
||||
|
||||
def AnalyticsDataFeed(self, uri):
|
||||
"""Retrieves an AnalyticsListFeed by retrieving a URI based off the
|
||||
Document List feed, including any query parameters. An
|
||||
AnalyticsListFeed object can be used to construct these parameters.
|
||||
|
||||
Args:
|
||||
uri: string The URI of the feed being retrieved possibly with query
|
||||
parameters.
|
||||
|
||||
Returns:
|
||||
An AnalyticsListFeed object representing the feed returned by the
|
||||
server.
|
||||
"""
|
||||
return self.Get(uri,
|
||||
converter=gdata.analytics.AnalyticsDataFeedFromString)
|
||||
|
||||
"""
|
||||
Account Fetching
|
||||
"""
|
||||
|
||||
def QueryAccountListFeed(self, uri):
|
||||
"""Retrieves an Account ListFeed by retrieving a URI based off the Account
|
||||
List feed, including any query parameters. A AccountQuery object can
|
||||
be used to construct these parameters.
|
||||
|
||||
Args:
|
||||
uri: string The URI of the feed being retrieved possibly with query
|
||||
parameters.
|
||||
|
||||
Returns:
|
||||
An AccountListFeed object representing the feed returned by the server.
|
||||
"""
|
||||
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
|
||||
|
||||
def GetAccountListEntry(self, uri):
|
||||
"""Retrieves a particular AccountListEntry by its unique URI.
|
||||
|
||||
Args:
|
||||
uri: string The unique URI of an entry in an Account List feed.
|
||||
|
||||
Returns:
|
||||
An AccountListEntry object representing the retrieved entry.
|
||||
"""
|
||||
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
|
||||
|
||||
def GetAccountList(self, username="default", max_results=1000,
|
||||
start_index=1):
|
||||
"""Retrieves a feed containing all of a user's accounts and profiles.
|
||||
The username parameter is soon to be deprecated, with 'default'
|
||||
becoming the only allowed parameter.
|
||||
"""
|
||||
if not username:
|
||||
raise Exception("username is a required parameter")
|
||||
q = gdata.analytics.service.AccountQuery(username=username,
|
||||
max_results=max_results,
|
||||
start_index=start_index);
|
||||
return self.QueryAccountListFeed(q.ToUri())
|
||||
|
||||
class DataQuery(gdata.service.Query):
|
||||
"""Object used to construct a URI to a data feed"""
|
||||
def __init__(self, feed='/feeds/data', text_query=None,
|
||||
params=None, categories=None, ids="",
|
||||
dimensions="", metrics="", sort="", filters="",
|
||||
start_date="", end_date="", start_index="",
|
||||
max_results=""):
|
||||
"""Constructor for Analytics List Query
|
||||
|
||||
Args:
|
||||
feed: string (optional) The path for the feed. (e.g. '/feeds/data')
|
||||
|
||||
text_query: string (optional) The contents of the q query parameter.
|
||||
This string is URL escaped upon conversion to a URI.
|
||||
params: dict (optional) Parameter value string pairs which become URL
|
||||
params when translated to a URI. These parameters are added to
|
||||
the query's items.
|
||||
categories: list (optional) List of category strings which should be
|
||||
included as query categories. See gdata.service.Query for
|
||||
additional documentation.
|
||||
ids: comma-separated string of analytics accounts.
|
||||
dimensions: comma-separated string of dimensions.
|
||||
metrics: comma-separated string of metrics.
|
||||
sort: comma-separated string of dimensions and metrics.
|
||||
This may be previxed with a minus to sort in reverse order
|
||||
(e.g. '-ga:keyword').
|
||||
If ommited, the first dimension passed in will be used.
|
||||
filters: comma-separated string of filter parameters
|
||||
(e.g. 'ga:keyword==google').
|
||||
start_date: start date for data pull.
|
||||
end_date: end date for data pull.
|
||||
start_index: used in combination with max_results to pull more than 1000
|
||||
entries. This defaults to 1.
|
||||
max_results: maximum results that the pull will return. This defaults
|
||||
to, and maxes out at 1000.
|
||||
|
||||
Yields:
|
||||
A DocumentQuery object used to construct a URI based on the Document
|
||||
List feed.
|
||||
"""
|
||||
self.elements = {'ids': ids,
|
||||
'dimensions': dimensions,
|
||||
'metrics': metrics,
|
||||
'sort': sort,
|
||||
'filters': filters,
|
||||
'start-date': start_date,
|
||||
'end-date': end_date,
|
||||
'start-index': start_index,
|
||||
'max-results': max_results}
|
||||
|
||||
gdata.service.Query.__init__(self, feed, text_query, params, categories)
|
||||
|
||||
def ToUri(self):
|
||||
"""Generates a URI from the query parameters set in the object.
|
||||
|
||||
Returns:
|
||||
A string containing the URI used to retrieve entries from the Analytics
|
||||
List feed.
|
||||
"""
|
||||
old_feed = self.feed
|
||||
self.feed = '/'.join([old_feed]) + '?' + \
|
||||
urllib.urlencode(dict([(key, value) for key, value in \
|
||||
self.elements.iteritems() if value]))
|
||||
new_feed = gdata.service.Query.ToUri(self)
|
||||
self.feed = old_feed
|
||||
return new_feed
|
||||
|
||||
|
||||
class AccountQuery(gdata.service.Query):
|
||||
"""Object used to construct a URI to query the Google Account List feed"""
|
||||
def __init__(self, feed='/feeds/accounts', start_index=1,
|
||||
max_results=1000, username='default', text_query=None,
|
||||
params=None, categories=None):
|
||||
"""Constructor for Account List Query
|
||||
|
||||
Args:
|
||||
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
|
||||
visibility: string (optional) The visibility chosen for the current
|
||||
feed.
|
||||
projection: string (optional) The projection chosen for the current
|
||||
feed.
|
||||
text_query: string (optional) The contents of the q query parameter.
|
||||
This string is URL escaped upon conversion to a URI.
|
||||
params: dict (optional) Parameter value string pairs which become URL
|
||||
params when translated to a URI. These parameters are added to
|
||||
the query's items.
|
||||
categories: list (optional) List of category strings which should be
|
||||
included as query categories. See gdata.service.Query for
|
||||
additional documentation.
|
||||
username: string (deprecated) This value should now always be passed as
|
||||
'default'.
|
||||
|
||||
Yields:
|
||||
A DocumentQuery object used to construct a URI based on the Document
|
||||
List feed.
|
||||
"""
|
||||
self.max_results = max_results
|
||||
self.start_index = start_index
|
||||
self.username = username
|
||||
gdata.service.Query.__init__(self, feed, text_query, params, categories)
|
||||
|
||||
def ToUri(self):
|
||||
"""Generates a URI from the query parameters set in the object.
|
||||
|
||||
Returns:
|
||||
A string containing the URI used to retrieve entries from the Account
|
||||
List feed.
|
||||
"""
|
||||
old_feed = self.feed
|
||||
self.feed = '/'.join([old_feed, self.username]) + '?' + \
|
||||
'&'.join(['max-results=' + str(self.max_results),
|
||||
'start-index=' + str(self.start_index)])
|
||||
new_feed = self.feed
|
||||
self.feed = old_feed
|
||||
return new_feed
|
||||
526
python/gdata/apps/__init__.py
Normal file
526
python/gdata/apps/__init__.py
Normal file
@@ -0,0 +1,526 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2007 SIOS Technology, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains objects used with Google Apps."""
|
||||
|
||||
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
# XML namespaces which are often used in Google Apps entity.
|
||||
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
|
||||
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
|
||||
|
||||
|
||||
class EmailList(atom.AtomBase):
|
||||
"""The Google Apps EmailList element"""
|
||||
|
||||
_tag = 'emailList'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['name'] = 'name'
|
||||
|
||||
def __init__(self, name=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.name = name
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
def EmailListFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(EmailList, xml_string)
|
||||
|
||||
|
||||
class Who(atom.AtomBase):
|
||||
"""The Google Apps Who element"""
|
||||
|
||||
_tag = 'who'
|
||||
_namespace = gdata.GDATA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['email'] = 'email'
|
||||
|
||||
def __init__(self, rel=None, email=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.rel = rel
|
||||
self.email = email
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
def WhoFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Who, xml_string)
|
||||
|
||||
|
||||
class Login(atom.AtomBase):
|
||||
"""The Google Apps Login element"""
|
||||
|
||||
_tag = 'login'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['userName'] = 'user_name'
|
||||
_attributes['password'] = 'password'
|
||||
_attributes['suspended'] = 'suspended'
|
||||
_attributes['admin'] = 'admin'
|
||||
_attributes['changePasswordAtNextLogin'] = 'change_password'
|
||||
_attributes['agreedToTerms'] = 'agreed_to_terms'
|
||||
_attributes['ipWhitelisted'] = 'ip_whitelisted'
|
||||
_attributes['hashFunctionName'] = 'hash_function_name'
|
||||
|
||||
def __init__(self, user_name=None, password=None, suspended=None,
|
||||
ip_whitelisted=None, hash_function_name=None,
|
||||
admin=None, change_password=None, agreed_to_terms=None,
|
||||
extension_elements=None, extension_attributes=None,
|
||||
text=None):
|
||||
self.user_name = user_name
|
||||
self.password = password
|
||||
self.suspended = suspended
|
||||
self.admin = admin
|
||||
self.change_password = change_password
|
||||
self.agreed_to_terms = agreed_to_terms
|
||||
self.ip_whitelisted = ip_whitelisted
|
||||
self.hash_function_name = hash_function_name
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def LoginFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Login, xml_string)
|
||||
|
||||
|
||||
class Quota(atom.AtomBase):
|
||||
"""The Google Apps Quota element"""
|
||||
|
||||
_tag = 'quota'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['limit'] = 'limit'
|
||||
|
||||
def __init__(self, limit=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.limit = limit
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def QuotaFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Quota, xml_string)
|
||||
|
||||
|
||||
class Name(atom.AtomBase):
|
||||
"""The Google Apps Name element"""
|
||||
|
||||
_tag = 'name'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['familyName'] = 'family_name'
|
||||
_attributes['givenName'] = 'given_name'
|
||||
|
||||
def __init__(self, family_name=None, given_name=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
self.family_name = family_name
|
||||
self.given_name = given_name
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def NameFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Name, xml_string)
|
||||
|
||||
|
||||
class Nickname(atom.AtomBase):
|
||||
"""The Google Apps Nickname element"""
|
||||
|
||||
_tag = 'nickname'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['name'] = 'name'
|
||||
|
||||
def __init__(self, name=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
self.name = name
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def NicknameFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Nickname, xml_string)
|
||||
|
||||
|
||||
class NicknameEntry(gdata.GDataEntry):
|
||||
"""A Google Apps flavor of an Atom Entry for Nickname"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
|
||||
_children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname)
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None,
|
||||
login=None, nickname=None,
|
||||
extended_property=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
|
||||
gdata.GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content,
|
||||
atom_id=atom_id, link=link, published=published,
|
||||
title=title, updated=updated)
|
||||
self.login = login
|
||||
self.nickname = nickname
|
||||
self.extended_property = extended_property or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def NicknameEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(NicknameEntry, xml_string)
|
||||
|
||||
|
||||
class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder):
|
||||
"""A Google Apps Nickname feed flavor of an Atom Feed"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry])
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None,
|
||||
entry=None, total_results=None, start_index=None,
|
||||
items_per_page=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
gdata.GDataFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results,
|
||||
start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def NicknameFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(NicknameFeed, xml_string)
|
||||
|
||||
|
||||
class UserEntry(gdata.GDataEntry):
|
||||
"""A Google Apps flavor of an Atom Entry"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
|
||||
_children['{%s}name' % APPS_NAMESPACE] = ('name', Name)
|
||||
_children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota)
|
||||
# This child may already be defined in GDataEntry, confirm before removing.
|
||||
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
|
||||
[gdata.FeedLink])
|
||||
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None,
|
||||
login=None, name=None, quota=None, who=None, feed_link=None,
|
||||
extended_property=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
|
||||
gdata.GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content,
|
||||
atom_id=atom_id, link=link, published=published,
|
||||
title=title, updated=updated)
|
||||
self.login = login
|
||||
self.name = name
|
||||
self.quota = quota
|
||||
self.who = who
|
||||
self.feed_link = feed_link or []
|
||||
self.extended_property = extended_property or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def UserEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(UserEntry, xml_string)
|
||||
|
||||
|
||||
class UserFeed(gdata.GDataFeed, gdata.LinkFinder):
|
||||
"""A Google Apps User feed flavor of an Atom Feed"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry])
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None,
|
||||
entry=None, total_results=None, start_index=None,
|
||||
items_per_page=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
gdata.GDataFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results,
|
||||
start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def UserFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(UserFeed, xml_string)
|
||||
|
||||
|
||||
class EmailListEntry(gdata.GDataEntry):
|
||||
"""A Google Apps EmailList flavor of an Atom Entry"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList)
|
||||
# Might be able to remove this _children entry.
|
||||
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
|
||||
[gdata.FeedLink])
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None,
|
||||
email_list=None, feed_link=None,
|
||||
extended_property=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
|
||||
gdata.GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content,
|
||||
atom_id=atom_id, link=link, published=published,
|
||||
title=title, updated=updated)
|
||||
self.email_list = email_list
|
||||
self.feed_link = feed_link or []
|
||||
self.extended_property = extended_property or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def EmailListEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(EmailListEntry, xml_string)
|
||||
|
||||
|
||||
class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder):
|
||||
"""A Google Apps EmailList feed flavor of an Atom Feed"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry])
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None,
|
||||
entry=None, total_results=None, start_index=None,
|
||||
items_per_page=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
gdata.GDataFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results,
|
||||
start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def EmailListFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(EmailListFeed, xml_string)
|
||||
|
||||
|
||||
class EmailListRecipientEntry(gdata.GDataEntry):
|
||||
"""A Google Apps EmailListRecipient flavor of an Atom Entry"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None,
|
||||
who=None,
|
||||
extended_property=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
|
||||
gdata.GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content,
|
||||
atom_id=atom_id, link=link, published=published,
|
||||
title=title, updated=updated)
|
||||
self.who = who
|
||||
self.extended_property = extended_property or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def EmailListRecipientEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string)
|
||||
|
||||
|
||||
class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder):
|
||||
"""A Google Apps EmailListRecipient feed flavor of an Atom Feed"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
|
||||
[EmailListRecipientEntry])
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None,
|
||||
entry=None, total_results=None, start_index=None,
|
||||
items_per_page=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
gdata.GDataFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results,
|
||||
start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def EmailListRecipientFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string)
|
||||
|
||||
|
||||
class Property(atom.AtomBase):
|
||||
"""The Google Apps Property element"""
|
||||
|
||||
_tag = 'property'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['name'] = 'name'
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, name=None, value=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def PropertyFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Property, xml_string)
|
||||
|
||||
|
||||
class PropertyEntry(gdata.GDataEntry):
|
||||
"""A Google Apps Property flavor of an Atom Entry"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}property' % APPS_NAMESPACE] = ('property', [Property])
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None,
|
||||
property=None,
|
||||
extended_property=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
|
||||
gdata.GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content,
|
||||
atom_id=atom_id, link=link, published=published,
|
||||
title=title, updated=updated)
|
||||
self.property = property
|
||||
self.extended_property = extended_property or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def PropertyEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(PropertyEntry, xml_string)
|
||||
|
||||
class PropertyFeed(gdata.GDataFeed, gdata.LinkFinder):
|
||||
"""A Google Apps Property feed flavor of an Atom Feed"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PropertyEntry])
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None,
|
||||
entry=None, total_results=None, start_index=None,
|
||||
items_per_page=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
gdata.GDataFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results,
|
||||
start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
def PropertyFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(PropertyFeed, xml_string)
|
||||
16
python/gdata/apps/adminsettings/__init__.py
Normal file
16
python/gdata/apps/adminsettings/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
471
python/gdata/apps/adminsettings/service.py
Normal file
471
python/gdata/apps/adminsettings/service.py
Normal file
@@ -0,0 +1,471 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Allow Google Apps domain administrators to set domain admin settings.
|
||||
|
||||
AdminSettingsService: Set admin settings."""
|
||||
|
||||
__author__ = 'jlee@pbu.edu'
|
||||
|
||||
|
||||
import gdata.apps
|
||||
import gdata.apps.service
|
||||
import gdata.service
|
||||
|
||||
|
||||
API_VER='2.0'
|
||||
|
||||
class AdminSettingsService(gdata.apps.service.PropertyService):
|
||||
"""Client for the Google Apps Admin Settings service."""
|
||||
|
||||
def _serviceUrl(self, setting_id, domain=None):
|
||||
if domain is None:
|
||||
domain = self.domain
|
||||
return '/a/feeds/domain/%s/%s/%s' % (API_VER, domain, setting_id)
|
||||
|
||||
def genericGet(self, location):
|
||||
"""Generic HTTP Get Wrapper
|
||||
|
||||
Args:
|
||||
location: relative uri to Get
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the get operation."""
|
||||
|
||||
uri = self._serviceUrl(location)
|
||||
try:
|
||||
return self._GetProperties(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def GetDefaultLanguage(self):
|
||||
"""Gets Domain Default Language
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
Default Language as a string. All possible values are listed at:
|
||||
http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags"""
|
||||
|
||||
result = self.genericGet('general/defaultLanguage')
|
||||
return result['defaultLanguage']
|
||||
|
||||
def UpdateDefaultLanguage(self, defaultLanguage):
|
||||
"""Updates Domain Default Language
|
||||
|
||||
Args:
|
||||
defaultLanguage: Domain Language to set
|
||||
possible values are at:
|
||||
http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the put operation"""
|
||||
|
||||
uri = self._serviceUrl('general/defaultLanguage')
|
||||
properties = {'defaultLanguage': defaultLanguage}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def GetOrganizationName(self):
|
||||
"""Gets Domain Default Language
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
Organization Name as a string."""
|
||||
|
||||
result = self.genericGet('general/organizationName')
|
||||
return result['organizationName']
|
||||
|
||||
|
||||
def UpdateOrganizationName(self, organizationName):
|
||||
"""Updates Organization Name
|
||||
|
||||
Args:
|
||||
organizationName: Name of organization
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the put operation"""
|
||||
|
||||
uri = self._serviceUrl('general/organizationName')
|
||||
properties = {'organizationName': organizationName}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def GetMaximumNumberOfUsers(self):
|
||||
"""Gets Maximum Number of Users Allowed
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: An integer, the maximum number of users"""
|
||||
|
||||
result = self.genericGet('general/maximumNumberOfUsers')
|
||||
return int(result['maximumNumberOfUsers'])
|
||||
|
||||
def GetCurrentNumberOfUsers(self):
|
||||
"""Gets Current Number of Users
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: An integer, the current number of users"""
|
||||
|
||||
result = self.genericGet('general/currentNumberOfUsers')
|
||||
return int(result['currentNumberOfUsers'])
|
||||
|
||||
def IsDomainVerified(self):
|
||||
"""Is the domain verified
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: Boolean, is domain verified"""
|
||||
|
||||
result = self.genericGet('accountInformation/isVerified')
|
||||
if result['isVerified'] == 'true':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def GetSupportPIN(self):
|
||||
"""Gets Support PIN
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A string, the Support PIN"""
|
||||
|
||||
result = self.genericGet('accountInformation/supportPIN')
|
||||
return result['supportPIN']
|
||||
|
||||
def GetEdition(self):
|
||||
"""Gets Google Apps Domain Edition
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A string, the domain's edition (premier, education, partner)"""
|
||||
|
||||
result = self.genericGet('accountInformation/edition')
|
||||
return result['edition']
|
||||
|
||||
def GetCustomerPIN(self):
|
||||
"""Gets Customer PIN
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A string, the customer PIN"""
|
||||
|
||||
result = self.genericGet('accountInformation/customerPIN')
|
||||
return result['customerPIN']
|
||||
|
||||
def GetCreationTime(self):
|
||||
"""Gets Domain Creation Time
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A string, the domain's creation time"""
|
||||
|
||||
result = self.genericGet('accountInformation/creationTime')
|
||||
return result['creationTime']
|
||||
|
||||
def GetCountryCode(self):
|
||||
"""Gets Domain Country Code
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A string, the domain's country code. Possible values at:
|
||||
http://www.iso.org/iso/country_codes/iso_3166_code_lists/english_country_names_and_code_elements.htm"""
|
||||
|
||||
result = self.genericGet('accountInformation/countryCode')
|
||||
return result['countryCode']
|
||||
|
||||
def GetAdminSecondaryEmail(self):
|
||||
"""Gets Domain Admin Secondary Email Address
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A string, the secondary email address for domain admin"""
|
||||
|
||||
result = self.genericGet('accountInformation/adminSecondaryEmail')
|
||||
return result['adminSecondaryEmail']
|
||||
|
||||
def UpdateAdminSecondaryEmail(self, adminSecondaryEmail):
|
||||
"""Gets Domain Creation Time
|
||||
|
||||
Args:
|
||||
adminSecondaryEmail: string, secondary email address of admin
|
||||
|
||||
Returns: A dict containing the result of the put operation"""
|
||||
|
||||
uri = self._serviceUrl('accountInformation/adminSecondaryEmail')
|
||||
properties = {'adminSecondaryEmail': adminSecondaryEmail}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def GetDomainLogo(self):
|
||||
"""Gets Domain Logo
|
||||
|
||||
This function does not make use of the Google Apps Admin Settings API,
|
||||
it does an HTTP Get of a url specific to the Google Apps domain. It is
|
||||
included for completeness sake.
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: binary image file"""
|
||||
|
||||
import urllib
|
||||
url = 'http://www.google.com/a/cpanel/'+self.domain+'/images/logo.gif'
|
||||
response = urllib.urlopen(url)
|
||||
return response.read()
|
||||
|
||||
def UpdateDomainLogo(self, logoImage):
|
||||
"""Update Domain's Custom Logo
|
||||
|
||||
Args:
|
||||
logoImage: binary image data
|
||||
|
||||
Returns: A dict containing the result of the put operation"""
|
||||
|
||||
from base64 import base64encode
|
||||
uri = self._serviceUrl('appearance/customLogo')
|
||||
properties = {'logoImage': base64encode(logoImage)}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def GetCNAMEVerificationStatus(self):
|
||||
"""Gets Domain CNAME Verification Status
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A dict {recordName, verified, verifiedMethod}"""
|
||||
|
||||
return self.genericGet('verification/cname')
|
||||
|
||||
def UpdateCNAMEVerificationStatus(self, verified):
|
||||
"""Updates CNAME Verification Status
|
||||
|
||||
Args:
|
||||
verified: boolean, True will retry verification process
|
||||
|
||||
Returns: A dict containing the result of the put operation"""
|
||||
|
||||
uri = self._serviceUrl('verification/cname')
|
||||
properties = self.GetCNAMEVerificationStatus()
|
||||
properties['verified'] = verified
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def GetMXVerificationStatus(self):
|
||||
"""Gets Domain MX Verification Status
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A dict {verified, verifiedMethod}"""
|
||||
|
||||
return self.genericGet('verification/mx')
|
||||
|
||||
def UpdateMXVerificationStatus(self, verified):
|
||||
"""Updates MX Verification Status
|
||||
|
||||
Args:
|
||||
verified: boolean, True will retry verification process
|
||||
|
||||
Returns: A dict containing the result of the put operation"""
|
||||
|
||||
uri = self._serviceUrl('verification/mx')
|
||||
properties = self.GetMXVerificationStatus()
|
||||
properties['verified'] = verified
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def GetSSOSettings(self):
|
||||
"""Gets Domain Single Sign-On Settings
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A dict {samlSignonUri, samlLogoutUri, changePasswordUri, enableSSO, ssoWhitelist, useDomainSpecificIssuer}"""
|
||||
|
||||
return self.genericGet('sso/general')
|
||||
|
||||
def UpdateSSOSettings(self, enableSSO=None, samlSignonUri=None,
|
||||
samlLogoutUri=None, changePasswordUri=None,
|
||||
ssoWhitelist=None, useDomainSpecificIssuer=None):
|
||||
"""Update SSO Settings.
|
||||
|
||||
Args:
|
||||
enableSSO: boolean, SSO Master on/off switch
|
||||
samlSignonUri: string, SSO Login Page
|
||||
samlLogoutUri: string, SSO Logout Page
|
||||
samlPasswordUri: string, SSO Password Change Page
|
||||
ssoWhitelist: string, Range of IP Addresses which will see SSO
|
||||
useDomainSpecificIssuer: boolean, Include Google Apps Domain in Issuer
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('sso/general')
|
||||
|
||||
#Get current settings, replace Nones with ''
|
||||
properties = self.GetSSOSettings()
|
||||
if properties['samlSignonUri'] == None:
|
||||
properties['samlSignonUri'] = ''
|
||||
if properties['samlLogoutUri'] == None:
|
||||
properties['samlLogoutUri'] = ''
|
||||
if properties['changePasswordUri'] == None:
|
||||
properties['changePasswordUri'] = ''
|
||||
if properties['ssoWhitelist'] == None:
|
||||
properties['ssoWhitelist'] = ''
|
||||
|
||||
#update only the values we were passed
|
||||
if enableSSO != None:
|
||||
properties['enableSSO'] = gdata.apps.service._bool2str(enableSSO)
|
||||
if samlSignonUri != None:
|
||||
properties['samlSignonUri'] = samlSignonUri
|
||||
if samlLogoutUri != None:
|
||||
properties['samlLogoutUri'] = samlLogoutUri
|
||||
if changePasswordUri != None:
|
||||
properties['changePasswordUri'] = changePasswordUri
|
||||
if ssoWhitelist != None:
|
||||
properties['ssoWhitelist'] = ssoWhitelist
|
||||
if useDomainSpecificIssuer != None:
|
||||
properties['useDomainSpecificIssuer'] = gdata.apps.service._bool2str(useDomainSpecificIssuer)
|
||||
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def GetSSOKey(self):
|
||||
"""Gets Domain Single Sign-On Signing Key
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns: A dict {modulus, exponent, algorithm, format}"""
|
||||
|
||||
return self.genericGet('sso/signingkey')
|
||||
|
||||
def UpdateSSOKey(self, signingKey):
|
||||
"""Update SSO Settings.
|
||||
|
||||
Args:
|
||||
signingKey: string, public key to be uploaded
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation."""
|
||||
|
||||
uri = self._serviceUrl('sso/signingkey')
|
||||
properties = {'signingKey': signingKey}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def IsUserMigrationEnabled(self):
|
||||
"""Is User Migration Enabled
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
boolean, is user migration enabled"""
|
||||
|
||||
result = self.genericGet('email/migration')
|
||||
if result['enableUserMigration'] == 'true':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def UpdateUserMigrationStatus(self, enableUserMigration):
|
||||
"""Update User Migration Status
|
||||
|
||||
Args:
|
||||
enableUserMigration: boolean, user migration enable/disable
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation."""
|
||||
|
||||
uri = self._serviceUrl('email/migration')
|
||||
properties = {'enableUserMigration': enableUserMigration}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def GetOutboundGatewaySettings(self):
|
||||
"""Get Outbound Gateway Settings
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
A dict {smartHost, smtpMode}"""
|
||||
|
||||
uri = self._serviceUrl('email/gateway')
|
||||
try:
|
||||
return self._GetProperties(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
except TypeError:
|
||||
#if no outbound gateway is set, we get a TypeError,
|
||||
#catch it and return nothing...
|
||||
return {'smartHost': None, 'smtpMode': None}
|
||||
|
||||
def UpdateOutboundGatewaySettings(self, smartHost=None, smtpMode=None):
|
||||
"""Update Outbound Gateway Settings
|
||||
|
||||
Args:
|
||||
smartHost: string, ip address or hostname of outbound gateway
|
||||
smtpMode: string, SMTP or SMTP_TLS
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation."""
|
||||
|
||||
uri = self._serviceUrl('email/gateway')
|
||||
|
||||
#Get current settings, replace Nones with ''
|
||||
properties = GetOutboundGatewaySettings()
|
||||
if properties['smartHost'] == None:
|
||||
properties['smartHost'] = ''
|
||||
if properties['smtpMode'] == None:
|
||||
properties['smtpMode'] = ''
|
||||
|
||||
#If we were passed new values for smartHost or smtpMode, update them
|
||||
if smartHost != None:
|
||||
properties['smartHost'] = smartHost
|
||||
if smtpMode != None:
|
||||
properties['smtpMode'] = smtpMode
|
||||
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def AddEmailRoute(self, routeDestination, routeRewriteTo, routeEnabled, bounceNotifications, accountHandling):
|
||||
"""Adds Domain Email Route
|
||||
|
||||
Args:
|
||||
routeDestination: string, destination ip address or hostname
|
||||
routeRewriteTo: boolean, rewrite smtp envelop To:
|
||||
routeEnabled: boolean, enable disable email routing
|
||||
bounceNotifications: boolean, send bound notificiations to sender
|
||||
accountHandling: string, which to route, "allAccounts", "provisionedAccounts", "unknownAccounts"
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation."""
|
||||
|
||||
uri = self._serviceUrl('emailrouting')
|
||||
properties = {}
|
||||
properties['routeDestination'] = routeDestination
|
||||
properties['routeRewriteTo'] = gdata.apps.service._bool2str(routeRewriteTo)
|
||||
properties['routeEnabled'] = gdata.apps.service._bool2str(routeEnabled)
|
||||
properties['bounceNotifications'] = gdata.apps.service._bool2str(bounceNotifications)
|
||||
properties['accountHandling'] = accountHandling
|
||||
return self._PostProperties(uri, properties)
|
||||
1
python/gdata/apps/audit/__init__.py
Normal file
1
python/gdata/apps/audit/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
277
python/gdata/apps/audit/service.py
Normal file
277
python/gdata/apps/audit/service.py
Normal file
@@ -0,0 +1,277 @@
|
||||
# Copyright (C) 2008 Google, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Allow Google Apps domain administrators to audit user data.
|
||||
|
||||
AuditService: Set auditing."""
|
||||
|
||||
__author__ = 'jlee@pbu.edu'
|
||||
|
||||
from base64 import b64encode
|
||||
|
||||
import gdata.apps
|
||||
import gdata.apps.service
|
||||
import gdata.service
|
||||
|
||||
class AuditService(gdata.apps.service.PropertyService):
|
||||
"""Client for the Google Apps Audit service."""
|
||||
|
||||
def _serviceUrl(self, setting_id, domain=None, user=None):
|
||||
if domain is None:
|
||||
domain = self.domain
|
||||
if user is None:
|
||||
return '/a/feeds/compliance/audit/%s/%s' % (setting_id, domain)
|
||||
else:
|
||||
return '/a/feeds/compliance/audit/%s/%s/%s' % (setting_id, domain, user)
|
||||
|
||||
def updatePGPKey(self, pgpkey):
|
||||
"""Updates Public PGP Key Google uses to encrypt audit data
|
||||
|
||||
Args:
|
||||
pgpkey: string, ASCII text of PGP Public Key to be used
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the POST operation."""
|
||||
|
||||
uri = self._serviceUrl('publickey')
|
||||
b64pgpkey = b64encode(pgpkey)
|
||||
properties = {}
|
||||
properties['publicKey'] = b64pgpkey
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def createEmailMonitor(self, source_user, destination_user, end_date,
|
||||
begin_date=None, incoming_headers_only=False,
|
||||
outgoing_headers_only=False, drafts=False,
|
||||
drafts_headers_only=False, chats=False,
|
||||
chats_headers_only=False):
|
||||
"""Creates a email monitor, forwarding the source_users emails/chats
|
||||
|
||||
Args:
|
||||
source_user: string, the user whose email will be audited
|
||||
destination_user: string, the user to receive the audited email
|
||||
end_date: string, the date the audit will end in
|
||||
"yyyy-MM-dd HH:mm" format, required
|
||||
begin_date: string, the date the audit will start in
|
||||
"yyyy-MM-dd HH:mm" format, leave blank to use current time
|
||||
incoming_headers_only: boolean, whether to audit only the headers of
|
||||
mail delivered to source user
|
||||
outgoing_headers_only: boolean, whether to audit only the headers of
|
||||
mail sent from the source user
|
||||
drafts: boolean, whether to audit draft messages of the source user
|
||||
drafts_headers_only: boolean, whether to audit only the headers of
|
||||
mail drafts saved by the user
|
||||
chats: boolean, whether to audit archived chats of the source user
|
||||
chats_headers_only: boolean, whether to audit only the headers of
|
||||
archived chats of the source user
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the POST operation."""
|
||||
|
||||
uri = self._serviceUrl('mail/monitor', user=source_user)
|
||||
properties = {}
|
||||
properties['destUserName'] = destination_user
|
||||
if begin_date is not None:
|
||||
properties['beginDate'] = begin_date
|
||||
properties['endDate'] = end_date
|
||||
if incoming_headers_only:
|
||||
properties['incomingEmailMonitorLevel'] = 'HEADER_ONLY'
|
||||
else:
|
||||
properties['incomingEmailMonitorLevel'] = 'FULL_MESSAGE'
|
||||
if outgoing_headers_only:
|
||||
properties['outgoingEmailMonitorLevel'] = 'HEADER_ONLY'
|
||||
else:
|
||||
properties['outgoingEmailMonitorLevel'] = 'FULL_MESSAGE'
|
||||
if drafts:
|
||||
if drafts_headers_only:
|
||||
properties['draftMonitorLevel'] = 'HEADER_ONLY'
|
||||
else:
|
||||
properties['draftMonitorLevel'] = 'FULL_MESSAGE'
|
||||
if chats:
|
||||
if chats_headers_only:
|
||||
properties['chatMonitorLevel'] = 'HEADER_ONLY'
|
||||
else:
|
||||
properties['chatMonitorLevel'] = 'FULL_MESSAGE'
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def getEmailMonitors(self, user):
|
||||
""""Gets the email monitors for the given user
|
||||
|
||||
Args:
|
||||
user: string, the user to retrieve email monitors for
|
||||
|
||||
Returns:
|
||||
list results of the POST operation
|
||||
|
||||
"""
|
||||
uri = self._serviceUrl('mail/monitor', user=user)
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def deleteEmailMonitor(self, source_user, destination_user):
|
||||
"""Deletes the email monitor for the given user
|
||||
|
||||
Args:
|
||||
source_user: string, the user who is being monitored
|
||||
destination_user: string, theuser who recieves the monitored emails
|
||||
|
||||
Returns:
|
||||
Nothing
|
||||
"""
|
||||
|
||||
uri = self._serviceUrl('mail/monitor', user=source_user+'/'+destination_user)
|
||||
try:
|
||||
return self._DeleteProperties(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def createAccountInformationRequest(self, user):
|
||||
"""Creates a request for account auditing details
|
||||
|
||||
Args:
|
||||
user: string, the user to request account information for
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the post operation."""
|
||||
|
||||
uri = self._serviceUrl('account', user=user)
|
||||
properties = {}
|
||||
#XML Body is left empty
|
||||
try:
|
||||
return self._PostProperties(uri, properties)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def getAccountInformationRequestStatus(self, user, request_id):
|
||||
"""Gets the status of an account auditing request
|
||||
|
||||
Args:
|
||||
user: string, the user whose account auditing details were requested
|
||||
request_id: string, the request_id
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the get operation."""
|
||||
|
||||
uri = self._serviceUrl('account', user=user+'/'+request_id)
|
||||
try:
|
||||
return self._GetProperties(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def getAllAccountInformationRequestsStatus(self):
|
||||
"""Gets the status of all account auditing requests for the domain
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
list results of the POST operation
|
||||
"""
|
||||
|
||||
uri = self._serviceUrl('account')
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
|
||||
def deleteAccountInformationRequest(self, user, request_id):
|
||||
"""Deletes the request for account auditing information
|
||||
|
||||
Args:
|
||||
user: string, the user whose account auditing details were requested
|
||||
request_id: string, the request_id
|
||||
|
||||
Returns:
|
||||
Nothing
|
||||
"""
|
||||
|
||||
uri = self._serviceUrl('account', user=user+'/'+request_id)
|
||||
try:
|
||||
return self._DeleteProperties(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def createMailboxExportRequest(self, user, begin_date=None, end_date=None, include_deleted=False, search_query=None, headers_only=False):
|
||||
"""Creates a mailbox export request
|
||||
|
||||
Args:
|
||||
user: string, the user whose mailbox export is being requested
|
||||
begin_date: string, date of earliest emails to export, optional, defaults to date of account creation
|
||||
format is 'yyyy-MM-dd HH:mm'
|
||||
end_date: string, date of latest emails to export, optional, defaults to current date
|
||||
format is 'yyyy-MM-dd HH:mm'
|
||||
include_deleted: boolean, whether to include deleted emails in export, mutually exclusive with search_query
|
||||
search_query: string, gmail style search query, matched emails will be exported, mutually exclusive with include_deleted
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the post operation."""
|
||||
|
||||
uri = self._serviceUrl('mail/export', user=user)
|
||||
properties = {}
|
||||
if begin_date is not None:
|
||||
properties['beginDate'] = begin_date
|
||||
if end_date is not None:
|
||||
properties['endDate'] = end_date
|
||||
if include_deleted is not None:
|
||||
properties['includeDeleted'] = gdata.apps.service._bool2str(include_deleted)
|
||||
if search_query is not None:
|
||||
properties['searchQuery'] = search_query
|
||||
if headers_only is True:
|
||||
properties['packageContent'] = 'HEADER_ONLY'
|
||||
else:
|
||||
properties['packageContent'] = 'FULL_MESSAGE'
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def getMailboxExportRequestStatus(self, user, request_id):
|
||||
"""Gets the status of an mailbox export request
|
||||
|
||||
Args:
|
||||
user: string, the user whose mailbox were requested
|
||||
request_id: string, the request_id
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the get operation."""
|
||||
|
||||
uri = self._serviceUrl('mail/export', user=user+'/'+request_id)
|
||||
try:
|
||||
return self._GetProperties(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def getAllMailboxExportRequestsStatus(self):
|
||||
"""Gets the status of all mailbox export requests for the domain
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
list results of the POST operation
|
||||
"""
|
||||
|
||||
uri = self._serviceUrl('mail/export')
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
|
||||
def deleteMailboxExportRequest(self, user, request_id):
|
||||
"""Deletes the request for mailbox export
|
||||
|
||||
Args:
|
||||
user: string, the user whose mailbox were requested
|
||||
request_id: string, the request_id
|
||||
|
||||
Returns:
|
||||
Nothing
|
||||
"""
|
||||
|
||||
uri = self._serviceUrl('mail/export', user=user+'/'+request_id)
|
||||
try:
|
||||
return self._DeleteProperties(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
15
python/gdata/apps/emailsettings/__init__.py
Normal file
15
python/gdata/apps/emailsettings/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
400
python/gdata/apps/emailsettings/client.py
Normal file
400
python/gdata/apps/emailsettings/client.py
Normal file
@@ -0,0 +1,400 @@
|
||||
#!/usr/bin/python2.4
|
||||
#
|
||||
# Copyright 2010 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""EmailSettingsClient simplifies Email Settings API calls.
|
||||
|
||||
EmailSettingsClient extends gdata.client.GDClient to ease interaction with
|
||||
the Google Apps Email Settings API. These interactions include the ability
|
||||
to create labels, filters, aliases, and update web-clip, forwarding, POP,
|
||||
IMAP, vacation-responder, signature, language, and general settings.
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'Claudio Cherubino <ccherubino@google.com>'
|
||||
|
||||
|
||||
import gdata.apps.emailsettings.data
|
||||
import gdata.client
|
||||
|
||||
|
||||
# Email Settings URI template
|
||||
# The strings in this template are eventually replaced with the API version,
|
||||
# Google Apps domain name, username, and settingID, respectively.
|
||||
EMAIL_SETTINGS_URI_TEMPLATE = '/a/feeds/emailsettings/%s/%s/%s/%s'
|
||||
|
||||
|
||||
# The settingID value for the label requests
|
||||
SETTING_ID_LABEL = 'label'
|
||||
# The settingID value for the filter requests
|
||||
SETTING_ID_FILTER = 'filter'
|
||||
# The settingID value for the send-as requests
|
||||
SETTING_ID_SENDAS = 'sendas'
|
||||
# The settingID value for the webclip requests
|
||||
SETTING_ID_WEBCLIP = 'webclip'
|
||||
# The settingID value for the forwarding requests
|
||||
SETTING_ID_FORWARDING = 'forwarding'
|
||||
# The settingID value for the POP requests
|
||||
SETTING_ID_POP = 'pop'
|
||||
# The settingID value for the IMAP requests
|
||||
SETTING_ID_IMAP = 'imap'
|
||||
# The settingID value for the vacation responder requests
|
||||
SETTING_ID_VACATION_RESPONDER = 'vacation'
|
||||
# The settingID value for the signature requests
|
||||
SETTING_ID_SIGNATURE = 'signature'
|
||||
# The settingID value for the language requests
|
||||
SETTING_ID_LANGUAGE = 'language'
|
||||
# The settingID value for the general requests
|
||||
SETTING_ID_GENERAL = 'general'
|
||||
|
||||
# The KEEP action for the email settings
|
||||
ACTION_KEEP = 'KEEP'
|
||||
# The ARCHIVE action for the email settings
|
||||
ACTION_ARCHIVE = 'ARCHIVE'
|
||||
# The DELETE action for the email settings
|
||||
ACTION_DELETE = 'DELETE'
|
||||
|
||||
# The ALL_MAIL setting for POP enable_for property
|
||||
POP_ENABLE_FOR_ALL_MAIL = 'ALL_MAIL'
|
||||
# The MAIL_FROM_NOW_ON setting for POP enable_for property
|
||||
POP_ENABLE_FOR_MAIL_FROM_NOW_ON = 'MAIL_FROM_NOW_ON'
|
||||
|
||||
|
||||
class EmailSettingsClient(gdata.client.GDClient):
|
||||
"""Client extension for the Google Email Settings API service.
|
||||
|
||||
Attributes:
|
||||
host: string The hostname for the Email Settings API service.
|
||||
api_version: string The version of the Email Settings API.
|
||||
"""
|
||||
|
||||
host = 'apps-apis.google.com'
|
||||
api_version = '2.0'
|
||||
auth_service = 'apps'
|
||||
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
|
||||
ssl = True
|
||||
|
||||
def __init__(self, domain, auth_token=None, **kwargs):
|
||||
"""Constructs a new client for the Email Settings API.
|
||||
|
||||
Args:
|
||||
domain: string The Google Apps domain with Email Settings.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the email settings.
|
||||
kwargs: The other parameters to pass to the gdata.client.GDClient
|
||||
constructor.
|
||||
"""
|
||||
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
|
||||
self.domain = domain
|
||||
|
||||
def make_email_settings_uri(self, username, setting_id):
|
||||
"""Creates the URI for the Email Settings API call.
|
||||
|
||||
Using this client's Google Apps domain, create the URI to setup
|
||||
email settings for the given user in that domain. If params are provided,
|
||||
append them as GET params.
|
||||
|
||||
Args:
|
||||
username: string The name of the user affected by this setting.
|
||||
setting_id: string The key of the setting to be configured.
|
||||
|
||||
Returns:
|
||||
A string giving the URI for Email Settings API calls for this client's
|
||||
Google Apps domain.
|
||||
"""
|
||||
uri = EMAIL_SETTINGS_URI_TEMPLATE % (self.api_version, self.domain,
|
||||
username, setting_id)
|
||||
return uri
|
||||
|
||||
MakeEmailSettingsUri = make_email_settings_uri
|
||||
|
||||
def create_label(self, username, name, **kwargs):
|
||||
"""Creates a label with the given properties.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
name: string The name of the label.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient.post().
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsLabel of the new resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_LABEL)
|
||||
new_label = gdata.apps.emailsettings.data.EmailSettingsLabel(
|
||||
uri=uri, name=name)
|
||||
return self.post(new_label, uri, **kwargs)
|
||||
|
||||
CreateLabel = create_label
|
||||
|
||||
def create_filter(self, username, from_address=None,
|
||||
to_address=None, subject=None, has_the_word=None,
|
||||
does_not_have_the_word=None, has_attachments=None,
|
||||
label=None, mark_as_read=None, archive=None, **kwargs):
|
||||
"""Creates a filter with the given properties.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
from_address: string The source email address for the filter.
|
||||
to_address: string (optional) The destination email address for
|
||||
the filter.
|
||||
subject: string (optional) The value the email must have in its
|
||||
subject to be filtered.
|
||||
has_the_word: string (optional) The value the email must have
|
||||
in its subject or body to be filtered.
|
||||
does_not_have_the_word: string (optional) The value the email
|
||||
cannot have in its subject or body to be filtered.
|
||||
has_attachments: string (optional) A boolean string representing
|
||||
whether the email must have an attachment to be filtered.
|
||||
label: string (optional) The name of the label to apply to
|
||||
messages matching the filter criteria.
|
||||
mark_as_read: Boolean (optional) Whether or not to mark
|
||||
messages matching the filter criteria as read.
|
||||
archive: Boolean (optional) Whether or not to move messages
|
||||
matching to Archived state.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient.post().
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsFilter of the new resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_FILTER)
|
||||
new_filter = gdata.apps.emailsettings.data.EmailSettingsFilter(
|
||||
uri=uri, from_address=from_address,
|
||||
to_address=to_address, subject=subject,
|
||||
has_the_word=has_the_word,
|
||||
does_not_have_the_word=does_not_have_the_word,
|
||||
has_attachments=has_attachments, label=label,
|
||||
mark_as_read=mark_as_read, archive=archive)
|
||||
return self.post(new_filter, uri, **kwargs)
|
||||
|
||||
CreateFilter = create_filter
|
||||
|
||||
def create_send_as(self, username, name, address, reply_to=None,
|
||||
make_default=None, **kwargs):
|
||||
"""Creates a send-as alias with the given properties.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
name: string The name that will appear in the "From" field.
|
||||
address: string The email address that appears as the
|
||||
origination address for emails sent by this user.
|
||||
reply_to: string (optional) The address to be used as the reply-to
|
||||
address in email sent using the alias.
|
||||
make_default: Boolean (optional) Whether or not this alias should
|
||||
become the default alias for this user.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient.post().
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsSendAsAlias of the
|
||||
new resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_SENDAS)
|
||||
new_alias = gdata.apps.emailsettings.data.EmailSettingsSendAsAlias(
|
||||
uri=uri, name=name, address=address,
|
||||
reply_to=reply_to, make_default=make_default)
|
||||
return self.post(new_alias, uri, **kwargs)
|
||||
|
||||
CreateSendAs = create_send_as
|
||||
|
||||
def update_webclip(self, username, enable, **kwargs):
|
||||
"""Enable/Disable Google Mail web clip.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
enable: Boolean Whether to enable showing Web clips.
|
||||
kwargs: The other parameters to pass to the update method.
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsWebClip of the
|
||||
updated resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_WEBCLIP)
|
||||
new_webclip = gdata.apps.emailsettings.data.EmailSettingsWebClip(
|
||||
uri=uri, enable=enable)
|
||||
return self.update(new_webclip, **kwargs)
|
||||
|
||||
UpdateWebclip = update_webclip
|
||||
|
||||
def update_forwarding(self, username, enable, forward_to=None,
|
||||
action=None, **kwargs):
|
||||
"""Update Google Mail Forwarding settings.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
enable: Boolean Whether to enable incoming email forwarding.
|
||||
forward_to: (optional) string The address email will be forwarded to.
|
||||
action: string (optional) The action to perform after forwarding
|
||||
an email (ACTION_KEEP, ACTION_ARCHIVE, ACTION_DELETE).
|
||||
kwargs: The other parameters to pass to the update method.
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsForwarding of the
|
||||
updated resource
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_FORWARDING)
|
||||
new_forwarding = gdata.apps.emailsettings.data.EmailSettingsForwarding(
|
||||
uri=uri, enable=enable, forward_to=forward_to, action=action)
|
||||
return self.update(new_forwarding, **kwargs)
|
||||
|
||||
UpdateForwarding = update_forwarding
|
||||
|
||||
def update_pop(self, username, enable, enable_for=None, action=None,
|
||||
**kwargs):
|
||||
"""Update Google Mail POP settings.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
enable: Boolean Whether to enable incoming POP3 access.
|
||||
enable_for: string (optional) Whether to enable POP3 for all mail
|
||||
(POP_ENABLE_FOR_ALL_MAIL), or mail from now on
|
||||
(POP_ENABLE_FOR_MAIL_FROM_NOW_ON).
|
||||
action: string (optional) What Google Mail should do with its copy
|
||||
of the email after it is retrieved using POP (ACTION_KEEP,
|
||||
ACTION_ARCHIVE, ACTION_DELETE).
|
||||
kwargs: The other parameters to pass to the update method.
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsPop of the updated resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_POP)
|
||||
new_pop = gdata.apps.emailsettings.data.EmailSettingsPop(
|
||||
uri=uri, enable=enable,
|
||||
enable_for=enable_for, action=action)
|
||||
return self.update(new_pop, **kwargs)
|
||||
|
||||
UpdatePop = update_pop
|
||||
|
||||
def update_imap(self, username, enable, **kwargs):
|
||||
"""Update Google Mail IMAP settings.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
enable: Boolean Whether to enable IMAP access.language
|
||||
kwargs: The other parameters to pass to the update method.
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsImap of the updated resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_IMAP)
|
||||
new_imap = gdata.apps.emailsettings.data.EmailSettingsImap(
|
||||
uri=uri, enable=enable)
|
||||
return self.update(new_imap, **kwargs)
|
||||
|
||||
UpdateImap = update_imap
|
||||
|
||||
def update_vacation(self, username, enable, subject=None, message=None,
|
||||
contacts_only=None, **kwargs):
|
||||
"""Update Google Mail vacation-responder settings.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
enable: Boolean Whether to enable the vacation responder.
|
||||
subject: string (optional) The subject line of the vacation responder
|
||||
autoresponse.
|
||||
message: string (optional) The message body of the vacation responder
|
||||
autoresponse.
|
||||
contacts_only: Boolean (optional) Whether to only send autoresponses
|
||||
to known contacts.
|
||||
kwargs: The other parameters to pass to the update method.
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsVacationResponder of the
|
||||
updated resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_VACATION_RESPONDER)
|
||||
new_vacation = gdata.apps.emailsettings.data.EmailSettingsVacationResponder(
|
||||
uri=uri, enable=enable, subject=subject,
|
||||
message=message, contacts_only=contacts_only)
|
||||
return self.update(new_vacation, **kwargs)
|
||||
|
||||
UpdateVacation = update_vacation
|
||||
|
||||
def update_signature(self, username, signature, **kwargs):
|
||||
"""Update Google Mail signature.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
signature: string The signature to be appended to outgoing messages.
|
||||
kwargs: The other parameters to pass to the update method.
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsSignature of the
|
||||
updated resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_SIGNATURE)
|
||||
new_signature = gdata.apps.emailsettings.data.EmailSettingsSignature(
|
||||
uri=uri, signature=signature)
|
||||
return self.update(new_signature, **kwargs)
|
||||
|
||||
UpdateSignature = update_signature
|
||||
|
||||
def update_language(self, username, language, **kwargs):
|
||||
"""Update Google Mail language settings.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
language: string The language tag for Google Mail's display language.
|
||||
kwargs: The other parameters to pass to the update method.
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsLanguage of the
|
||||
updated resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_LANGUAGE)
|
||||
new_language = gdata.apps.emailsettings.data.EmailSettingsLanguage(
|
||||
uri=uri, language=language)
|
||||
return self.update(new_language, **kwargs)
|
||||
|
||||
UpdateLanguage = update_language
|
||||
|
||||
def update_general_settings(self, username, page_size=None, shortcuts=None,
|
||||
arrows=None, snippets=None, use_unicode=None,
|
||||
**kwargs):
|
||||
"""Update Google Mail general settings.
|
||||
|
||||
Args:
|
||||
username: string The name of the user.
|
||||
page_size: int (optional) The number of conversations to be shown per
|
||||
page.
|
||||
shortcuts: Boolean (optional) Whether to enable keyboard shortcuts.
|
||||
arrows: Boolean (optional) Whether to display arrow-shaped personal
|
||||
indicators next to email sent specifically to the user.
|
||||
snippets: Boolean (optional) Whether to display snippets of the messages
|
||||
in the inbox and when searching.
|
||||
use_unicode: Boolean (optional) Whether to use UTF-8 (unicode) encoding
|
||||
for all outgoing messages.
|
||||
kwargs: The other parameters to pass to the update method.
|
||||
|
||||
Returns:
|
||||
gdata.apps.emailsettings.data.EmailSettingsGeneral of the
|
||||
updated resource.
|
||||
"""
|
||||
uri = self.MakeEmailSettingsUri(username=username,
|
||||
setting_id=SETTING_ID_GENERAL)
|
||||
new_general = gdata.apps.emailsettings.data.EmailSettingsGeneral(
|
||||
uri=uri, page_size=page_size, shortcuts=shortcuts,
|
||||
arrows=arrows, snippets=snippets, use_unicode=use_unicode)
|
||||
return self.update(new_general, **kwargs)
|
||||
|
||||
UpdateGeneralSettings = update_general_settings
|
||||
1130
python/gdata/apps/emailsettings/data.py
Normal file
1130
python/gdata/apps/emailsettings/data.py
Normal file
File diff suppressed because it is too large
Load Diff
264
python/gdata/apps/emailsettings/service.py
Normal file
264
python/gdata/apps/emailsettings/service.py
Normal file
@@ -0,0 +1,264 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Allow Google Apps domain administrators to set users' email settings.
|
||||
|
||||
EmailSettingsService: Set various email settings.
|
||||
"""
|
||||
|
||||
__author__ = 'google-apps-apis@googlegroups.com'
|
||||
|
||||
|
||||
import gdata.apps
|
||||
import gdata.apps.service
|
||||
import gdata.service
|
||||
|
||||
|
||||
API_VER='2.0'
|
||||
# Forwarding and POP3 options
|
||||
KEEP='KEEP'
|
||||
ARCHIVE='ARCHIVE'
|
||||
DELETE='DELETE'
|
||||
ALL_MAIL='ALL_MAIL'
|
||||
MAIL_FROM_NOW_ON='MAIL_FROM_NOW_ON'
|
||||
|
||||
|
||||
class EmailSettingsService(gdata.apps.service.PropertyService):
|
||||
"""Client for the Google Apps Email Settings service."""
|
||||
|
||||
def _serviceUrl(self, setting_id, username, domain=None):
|
||||
if domain is None:
|
||||
domain = self.domain
|
||||
return '/a/feeds/emailsettings/%s/%s/%s/%s' % (API_VER, domain, username,
|
||||
setting_id)
|
||||
|
||||
def CreateLabel(self, username, label):
|
||||
"""Create a label.
|
||||
|
||||
Args:
|
||||
username: User to create label for.
|
||||
label: Label to create.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the create operation.
|
||||
"""
|
||||
uri = self._serviceUrl('label', username)
|
||||
properties = {'label': label}
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def CreateFilter(self, username, from_=None, to=None, subject=None,
|
||||
has_the_word=None, does_not_have_the_word=None,
|
||||
has_attachment=None, label=None, should_mark_as_read=None,
|
||||
should_archive=None):
|
||||
"""Create a filter.
|
||||
|
||||
Args:
|
||||
username: User to create filter for.
|
||||
from_: Filter from string.
|
||||
to: Filter to string.
|
||||
subject: Filter subject.
|
||||
has_the_word: Words to filter in.
|
||||
does_not_have_the_word: Words to filter out.
|
||||
has_attachment: Boolean for message having attachment.
|
||||
label: Label to apply.
|
||||
should_mark_as_read: Boolean for marking message as read.
|
||||
should_archive: Boolean for archiving message.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the create operation.
|
||||
"""
|
||||
uri = self._serviceUrl('filter', username)
|
||||
properties = {}
|
||||
properties['from'] = from_
|
||||
properties['to'] = to
|
||||
properties['subject'] = subject
|
||||
properties['hasTheWord'] = has_the_word
|
||||
properties['doesNotHaveTheWord'] = does_not_have_the_word
|
||||
properties['hasAttachment'] = gdata.apps.service._bool2str(has_attachment)
|
||||
properties['label'] = label
|
||||
properties['shouldMarkAsRead'] = gdata.apps.service._bool2str(should_mark_as_read)
|
||||
properties['shouldArchive'] = gdata.apps.service._bool2str(should_archive)
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def CreateSendAsAlias(self, username, name, address, reply_to=None,
|
||||
make_default=None):
|
||||
"""Create alias to send mail as.
|
||||
|
||||
Args:
|
||||
username: User to create alias for.
|
||||
name: Name of alias.
|
||||
address: Email address to send from.
|
||||
reply_to: Email address to reply to.
|
||||
make_default: Boolean for whether this is the new default sending alias.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the create operation.
|
||||
"""
|
||||
uri = self._serviceUrl('sendas', username)
|
||||
properties = {}
|
||||
properties['name'] = name
|
||||
properties['address'] = address
|
||||
properties['replyTo'] = reply_to
|
||||
properties['makeDefault'] = gdata.apps.service._bool2str(make_default)
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def UpdateWebClipSettings(self, username, enable):
|
||||
"""Update WebClip Settings
|
||||
|
||||
Args:
|
||||
username: User to update forwarding for.
|
||||
enable: Boolean whether to enable Web Clip.
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('webclip', username)
|
||||
properties = {}
|
||||
properties['enable'] = gdata.apps.service._bool2str(enable)
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def UpdateForwarding(self, username, enable, forward_to=None, action=None):
|
||||
"""Update forwarding settings.
|
||||
|
||||
Args:
|
||||
username: User to update forwarding for.
|
||||
enable: Boolean whether to enable this forwarding rule.
|
||||
forward_to: Email address to forward to.
|
||||
action: Action to take after forwarding.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('forwarding', username)
|
||||
properties = {}
|
||||
properties['enable'] = gdata.apps.service._bool2str(enable)
|
||||
if enable is True:
|
||||
properties['forwardTo'] = forward_to
|
||||
properties['action'] = action
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def UpdatePop(self, username, enable, enable_for=None, action=None):
|
||||
"""Update POP3 settings.
|
||||
|
||||
Args:
|
||||
username: User to update POP3 settings for.
|
||||
enable: Boolean whether to enable POP3.
|
||||
enable_for: Which messages to make available via POP3.
|
||||
action: Action to take after user retrieves email via POP3.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('pop', username)
|
||||
properties = {}
|
||||
properties['enable'] = gdata.apps.service._bool2str(enable)
|
||||
if enable is True:
|
||||
properties['enableFor'] = enable_for
|
||||
properties['action'] = action
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def UpdateImap(self, username, enable):
|
||||
"""Update IMAP settings.
|
||||
|
||||
Args:
|
||||
username: User to update IMAP settings for.
|
||||
enable: Boolean whether to enable IMAP.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('imap', username)
|
||||
properties = {'enable': gdata.apps.service._bool2str(enable)}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def UpdateVacation(self, username, enable, subject=None, message=None,
|
||||
contacts_only=None):
|
||||
"""Update vacation settings.
|
||||
|
||||
Args:
|
||||
username: User to update vacation settings for.
|
||||
enable: Boolean whether to enable vacation responses.
|
||||
subject: Vacation message subject.
|
||||
message: Vacation message body.
|
||||
contacts_only: Boolean whether to send message only to contacts.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('vacation', username)
|
||||
properties = {}
|
||||
properties['enable'] = gdata.apps.service._bool2str(enable)
|
||||
if enable is True:
|
||||
properties['subject'] = subject
|
||||
properties['message'] = message
|
||||
properties['contactsOnly'] = gdata.apps.service._bool2str(contacts_only)
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def UpdateSignature(self, username, signature):
|
||||
"""Update signature.
|
||||
|
||||
Args:
|
||||
username: User to update signature for.
|
||||
signature: Signature string.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('signature', username)
|
||||
properties = {'signature': signature}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def UpdateLanguage(self, username, language):
|
||||
"""Update user interface language.
|
||||
|
||||
Args:
|
||||
username: User to update language for.
|
||||
language: Language code.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('language', username)
|
||||
properties = {'language': language}
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None,
|
||||
snippets=None, unicode=None):
|
||||
"""Update general settings.
|
||||
|
||||
Args:
|
||||
username: User to update general settings for.
|
||||
page_size: Number of messages to show.
|
||||
shortcuts: Boolean whether shortcuts are enabled.
|
||||
arrows: Boolean whether arrows are enabled.
|
||||
snippets: Boolean whether snippets are enabled.
|
||||
unicode: Wheter unicode is enabled.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._serviceUrl('general', username)
|
||||
properties = {}
|
||||
if page_size != None:
|
||||
properties['pageSize'] = str(page_size)
|
||||
if shortcuts != None:
|
||||
properties['shortcuts'] = gdata.apps.service._bool2str(shortcuts)
|
||||
if arrows != None:
|
||||
properties['arrows'] = gdata.apps.service._bool2str(arrows)
|
||||
if snippets != None:
|
||||
properties['snippets'] = gdata.apps.service._bool2str(snippets)
|
||||
if unicode != None:
|
||||
properties['unicode'] = gdata.apps.service._bool2str(unicode)
|
||||
return self._PutProperties(uri, properties)
|
||||
0
python/gdata/apps/groups/__init__.py
Normal file
0
python/gdata/apps/groups/__init__.py
Normal file
387
python/gdata/apps/groups/service.py
Normal file
387
python/gdata/apps/groups/service.py
Normal file
@@ -0,0 +1,387 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Allow Google Apps domain administrators to manage groups, group members and group owners.
|
||||
|
||||
GroupsService: Provides methods to manage groups, members and owners.
|
||||
"""
|
||||
|
||||
__author__ = 'google-apps-apis@googlegroups.com'
|
||||
|
||||
|
||||
import urllib
|
||||
import gdata.apps
|
||||
import gdata.apps.service
|
||||
import gdata.service
|
||||
|
||||
|
||||
API_VER = '2.0'
|
||||
BASE_URL = '/a/feeds/group/' + API_VER + '/%s'
|
||||
GROUP_MEMBER_URL = BASE_URL + '?member=%s'
|
||||
GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s'
|
||||
GROUP_ID_URL = BASE_URL + '/%s'
|
||||
MEMBER_URL = BASE_URL + '/%s/member'
|
||||
MEMBER_WITH_SUSPENDED_URL = MEMBER_URL + '?includeSuspendedUsers=%s'
|
||||
MEMBER_ID_URL = MEMBER_URL + '/%s'
|
||||
OWNER_URL = BASE_URL + '/%s/owner'
|
||||
OWNER_WITH_SUSPENDED_URL = OWNER_URL + '?includeSuspendedUsers=%s'
|
||||
OWNER_ID_URL = OWNER_URL + '/%s'
|
||||
|
||||
PERMISSION_OWNER = 'Owner'
|
||||
PERMISSION_MEMBER = 'Member'
|
||||
PERMISSION_DOMAIN = 'Domain'
|
||||
PERMISSION_ANYONE = 'Anyone'
|
||||
|
||||
|
||||
class GroupsService(gdata.apps.service.PropertyService):
|
||||
"""Client for the Google Apps Groups service."""
|
||||
|
||||
def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email,
|
||||
direct_only=False, domain=None, suspended_users=False):
|
||||
if domain is None:
|
||||
domain = self.domain
|
||||
|
||||
if service_type == 'group':
|
||||
if group_id != '' and is_existed:
|
||||
return GROUP_ID_URL % (domain, group_id)
|
||||
elif member_id != '':
|
||||
if direct_only:
|
||||
return GROUP_MEMBER_DIRECT_URL % (domain, urllib.quote_plus(member_id),
|
||||
self._Bool2Str(direct_only))
|
||||
else:
|
||||
return GROUP_MEMBER_URL % (domain, urllib.quote_plus(member_id))
|
||||
else:
|
||||
return BASE_URL % (domain)
|
||||
|
||||
if service_type == 'member':
|
||||
if member_id != '' and is_existed:
|
||||
return MEMBER_ID_URL % (domain, group_id, urllib.quote_plus(member_id))
|
||||
elif suspended_users:
|
||||
return MEMBER_WITH_SUSPENDED_URL % (domain, group_id,
|
||||
self._Bool2Str(suspended_users))
|
||||
else:
|
||||
return MEMBER_URL % (domain, group_id)
|
||||
|
||||
if service_type == 'owner':
|
||||
if owner_email != '' and is_existed:
|
||||
return OWNER_ID_URL % (domain, group_id, urllib.quote_plus(owner_email))
|
||||
elif suspended_users:
|
||||
return OWNER_WITH_SUSPENDED_URL % (domain, group_id,
|
||||
self._Bool2Str(suspended_users))
|
||||
else:
|
||||
return OWNER_URL % (domain, group_id)
|
||||
|
||||
def _Bool2Str(self, b):
|
||||
if b is None:
|
||||
return None
|
||||
return str(b is True).lower()
|
||||
|
||||
def _IsExisted(self, uri):
|
||||
try:
|
||||
self._GetProperties(uri)
|
||||
return True
|
||||
except gdata.apps.service.AppsForYourDomainException, e:
|
||||
if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST:
|
||||
return False
|
||||
else:
|
||||
raise e
|
||||
|
||||
def CreateGroup(self, group_id, group_name, description, email_permission):
|
||||
"""Create a group.
|
||||
|
||||
Args:
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
group_name: The name of the group.
|
||||
description: A description of the group
|
||||
email_permission: The subscription permission of the group.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the create operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('group', False, group_id, '', '')
|
||||
properties = {}
|
||||
properties['groupId'] = group_id
|
||||
properties['groupName'] = group_name
|
||||
properties['description'] = description
|
||||
properties['emailPermission'] = email_permission
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def UpdateGroup(self, group_id, group_name, description, email_permission):
|
||||
"""Update a group's name, description and/or permission.
|
||||
|
||||
Args:
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
group_name: The name of the group.
|
||||
description: A description of the group
|
||||
email_permission: The subscription permission of the group.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('group', True, group_id, '', '')
|
||||
properties = {}
|
||||
properties['groupId'] = group_id
|
||||
properties['groupName'] = group_name
|
||||
properties['description'] = description
|
||||
properties['emailPermission'] = email_permission
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def RetrieveGroup(self, group_id):
|
||||
"""Retrieve a group based on its ID.
|
||||
|
||||
Args:
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('group', True, group_id, '', '')
|
||||
return self._GetProperties(uri)
|
||||
|
||||
def RetrieveAllGroups(self):
|
||||
"""Retrieve all groups in the domain.
|
||||
|
||||
Args:
|
||||
None
|
||||
|
||||
Returns:
|
||||
A list containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('group', True, '', '', '')
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def RetrievePageOfGroups(self, start_group=None):
|
||||
"""Retrieve one page of groups in the domain.
|
||||
|
||||
Args:
|
||||
start_group: The key to continue for pagination through all groups.
|
||||
|
||||
Returns:
|
||||
A feed object containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('group', True, '', '', '')
|
||||
if start_group is not None:
|
||||
uri += "?start="+start_group
|
||||
property_feed = self._GetPropertyFeed(uri)
|
||||
return property_feed
|
||||
|
||||
def RetrieveGroups(self, member_id, direct_only=False):
|
||||
"""Retrieve all groups that belong to the given member_id.
|
||||
|
||||
Args:
|
||||
member_id: The member's email address (e.g. member@example.com).
|
||||
direct_only: Boolean whether only return groups that this member directly belongs to.
|
||||
|
||||
Returns:
|
||||
A list containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('group', True, '', member_id, '', direct_only=direct_only)
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def DeleteGroup(self, group_id):
|
||||
"""Delete a group based on its ID.
|
||||
|
||||
Args:
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the delete operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('group', True, group_id, '', '')
|
||||
return self._DeleteProperties(uri)
|
||||
|
||||
def AddMemberToGroup(self, member_id, group_id):
|
||||
"""Add a member to a group.
|
||||
|
||||
Args:
|
||||
member_id: The member's email address (e.g. member@example.com).
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the add operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('member', False, group_id, member_id, '')
|
||||
properties = {}
|
||||
properties['memberId'] = member_id
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def IsMember(self, member_id, group_id):
|
||||
"""Check whether the given member already exists in the given group.
|
||||
|
||||
Args:
|
||||
member_id: The member's email address (e.g. member@example.com).
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
True if the member exists in the group. False otherwise.
|
||||
"""
|
||||
uri = self._ServiceUrl('member', True, group_id, member_id, '')
|
||||
return self._IsExisted(uri)
|
||||
|
||||
def RetrieveMember(self, member_id, group_id):
|
||||
"""Retrieve the given member in the given group.
|
||||
|
||||
Args:
|
||||
member_id: The member's email address (e.g. member@example.com).
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('member', True, group_id, member_id, '')
|
||||
return self._GetProperties(uri)
|
||||
|
||||
def RetrieveAllMembers(self, group_id, suspended_users=False):
|
||||
"""Retrieve all members in the given group.
|
||||
|
||||
Args:
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
suspended_users: A boolean; should we include any suspended users in
|
||||
the membership list returned?
|
||||
|
||||
Returns:
|
||||
A list containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('member', True, group_id, '', '',
|
||||
suspended_users=suspended_users)
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def RetrievePageOfMembers(self, group_id, suspended_users=False, start=None):
|
||||
"""Retrieve one page of members of a given group.
|
||||
|
||||
Args:
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
suspended_users: A boolean; should we include any suspended users in
|
||||
the membership list returned?
|
||||
start: The key to continue for pagination through all members.
|
||||
|
||||
Returns:
|
||||
A feed object containing the result of the retrieve operation.
|
||||
"""
|
||||
|
||||
uri = self._ServiceUrl('member', True, group_id, '', '',
|
||||
suspended_users=suspended_users)
|
||||
if start is not None:
|
||||
if suspended_users:
|
||||
uri += "&start="+start
|
||||
else:
|
||||
uri += "?start="+start
|
||||
property_feed = self._GetPropertyFeed(uri)
|
||||
return property_feed
|
||||
|
||||
def RemoveMemberFromGroup(self, member_id, group_id):
|
||||
"""Remove the given member from the given group.
|
||||
|
||||
Args:
|
||||
member_id: The member's email address (e.g. member@example.com).
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the remove operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('member', True, group_id, member_id, '')
|
||||
return self._DeleteProperties(uri)
|
||||
|
||||
def AddOwnerToGroup(self, owner_email, group_id):
|
||||
"""Add an owner to a group.
|
||||
|
||||
Args:
|
||||
owner_email: The email address of a group owner.
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the add operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('owner', False, group_id, '', owner_email)
|
||||
properties = {}
|
||||
properties['email'] = owner_email
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def IsOwner(self, owner_email, group_id):
|
||||
"""Check whether the given member an owner of the given group.
|
||||
|
||||
Args:
|
||||
owner_email: The email address of a group owner.
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
True if the member is an owner of the given group. False otherwise.
|
||||
"""
|
||||
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
|
||||
return self._IsExisted(uri)
|
||||
|
||||
def RetrieveOwner(self, owner_email, group_id):
|
||||
"""Retrieve the given owner in the given group.
|
||||
|
||||
Args:
|
||||
owner_email: The email address of a group owner.
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
|
||||
return self._GetProperties(uri)
|
||||
|
||||
def RetrieveAllOwners(self, group_id, suspended_users=False):
|
||||
"""Retrieve all owners of the given group.
|
||||
|
||||
Args:
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
suspended_users: A boolean; should we include any suspended users in
|
||||
the ownership list returned?
|
||||
|
||||
Returns:
|
||||
A list containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('owner', True, group_id, '', '',
|
||||
suspended_users=suspended_users)
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def RetrievePageOfOwners(self, group_id, suspended_users=False, start=None):
|
||||
"""Retrieve one page of owners of the given group.
|
||||
|
||||
Args:
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
suspended_users: A boolean; should we include any suspended users in
|
||||
the ownership list returned?
|
||||
start: The key to continue for pagination through all owners.
|
||||
|
||||
Returns:
|
||||
A feed object containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('owner', True, group_id, '', '',
|
||||
suspended_users=suspended_users)
|
||||
if start is not None:
|
||||
if suspended_users:
|
||||
uri += "&start="+start
|
||||
else:
|
||||
uri += "?start="+start
|
||||
property_feed = self._GetPropertyFeed(uri)
|
||||
return property_feed
|
||||
|
||||
def RemoveOwnerFromGroup(self, owner_email, group_id):
|
||||
"""Remove the given owner from the given group.
|
||||
|
||||
Args:
|
||||
owner_email: The email address of a group owner.
|
||||
group_id: The ID of the group (e.g. us-sales).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the remove operation.
|
||||
"""
|
||||
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
|
||||
return self._DeleteProperties(uri)
|
||||
212
python/gdata/apps/migration/__init__.py
Normal file
212
python/gdata/apps/migration/__init__.py
Normal file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains objects used with Google Apps."""
|
||||
|
||||
__author__ = 'google-apps-apis@googlegroups.com'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
# XML namespaces which are often used in Google Apps entity.
|
||||
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
|
||||
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
|
||||
|
||||
|
||||
class Rfc822Msg(atom.AtomBase):
|
||||
"""The Migration rfc822Msg element."""
|
||||
|
||||
_tag = 'rfc822Msg'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['encoding'] = 'encoding'
|
||||
|
||||
def __init__(self, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.text = text
|
||||
self.encoding = 'base64'
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def Rfc822MsgFromString(xml_string):
|
||||
"""Parse in the Rrc822 message from the XML definition."""
|
||||
|
||||
return atom.CreateClassFromXMLString(Rfc822Msg, xml_string)
|
||||
|
||||
|
||||
class MailItemProperty(atom.AtomBase):
|
||||
"""The Migration mailItemProperty element."""
|
||||
|
||||
_tag = 'mailItemProperty'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, value=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.value = value
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def MailItemPropertyFromString(xml_string):
|
||||
"""Parse in the MailItemProperiy from the XML definition."""
|
||||
|
||||
return atom.CreateClassFromXMLString(MailItemProperty, xml_string)
|
||||
|
||||
|
||||
class Label(atom.AtomBase):
|
||||
"""The Migration label element."""
|
||||
|
||||
_tag = 'label'
|
||||
_namespace = APPS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['labelName'] = 'label_name'
|
||||
|
||||
def __init__(self, label_name=None,
|
||||
extension_elements=None, extension_attributes=None,
|
||||
text=None):
|
||||
self.label_name = label_name
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def LabelFromString(xml_string):
|
||||
"""Parse in the mailItemProperty from the XML definition."""
|
||||
|
||||
return atom.CreateClassFromXMLString(Label, xml_string)
|
||||
|
||||
|
||||
class MailEntry(gdata.GDataEntry):
|
||||
"""A Google Migration flavor of an Atom Entry."""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
|
||||
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
|
||||
[MailItemProperty])
|
||||
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None,
|
||||
rfc822_msg=None, mail_item_property=None, label=None,
|
||||
extended_property=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
|
||||
gdata.GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content,
|
||||
atom_id=atom_id, link=link, published=published,
|
||||
title=title, updated=updated)
|
||||
self.rfc822_msg = rfc822_msg
|
||||
self.mail_item_property = mail_item_property
|
||||
self.label = label
|
||||
self.extended_property = extended_property or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def MailEntryFromString(xml_string):
|
||||
"""Parse in the MailEntry from the XML definition."""
|
||||
|
||||
return atom.CreateClassFromXMLString(MailEntry, xml_string)
|
||||
|
||||
|
||||
class BatchMailEntry(gdata.BatchEntry):
|
||||
"""A Google Migration flavor of an Atom Entry."""
|
||||
|
||||
_tag = gdata.BatchEntry._tag
|
||||
_namespace = gdata.BatchEntry._namespace
|
||||
_children = gdata.BatchEntry._children.copy()
|
||||
_attributes = gdata.BatchEntry._attributes.copy()
|
||||
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
|
||||
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
|
||||
[MailItemProperty])
|
||||
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None,
|
||||
rfc822_msg=None, mail_item_property=None, label=None,
|
||||
batch_operation=None, batch_id=None, batch_status=None,
|
||||
extended_property=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
|
||||
gdata.BatchEntry.__init__(self, author=author, category=category,
|
||||
content=content,
|
||||
atom_id=atom_id, link=link, published=published,
|
||||
batch_operation=batch_operation,
|
||||
batch_id=batch_id, batch_status=batch_status,
|
||||
title=title, updated=updated)
|
||||
self.rfc822_msg = rfc822_msg or None
|
||||
self.mail_item_property = mail_item_property or []
|
||||
self.label = label or []
|
||||
self.extended_property = extended_property or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def BatchMailEntryFromString(xml_string):
|
||||
"""Parse in the BatchMailEntry from the XML definition."""
|
||||
|
||||
return atom.CreateClassFromXMLString(BatchMailEntry, xml_string)
|
||||
|
||||
|
||||
class BatchMailEventFeed(gdata.BatchFeed):
|
||||
"""A Migration event feed flavor of an Atom Feed."""
|
||||
|
||||
_tag = gdata.BatchFeed._tag
|
||||
_namespace = gdata.BatchFeed._namespace
|
||||
_children = gdata.BatchFeed._children.copy()
|
||||
_attributes = gdata.BatchFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchMailEntry])
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None,
|
||||
entry=None, total_results=None, start_index=None,
|
||||
items_per_page=None, interrupted=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
gdata.BatchFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results,
|
||||
start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
interrupted=interrupted,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def BatchMailEventFeedFromString(xml_string):
|
||||
"""Parse in the BatchMailEventFeed from the XML definition."""
|
||||
|
||||
return atom.CreateClassFromXMLString(BatchMailEventFeed, xml_string)
|
||||
129
python/gdata/apps/migration/service.py
Normal file
129
python/gdata/apps/migration/service.py
Normal file
@@ -0,0 +1,129 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains the methods to import mail via Google Apps Email Migration API.
|
||||
|
||||
MigrationService: Provides methids to import mail.
|
||||
"""
|
||||
|
||||
__author__ = 'google-apps-apis@googlegroups.com'
|
||||
|
||||
|
||||
import base64
|
||||
import gdata
|
||||
import gdata.apps.service
|
||||
import gdata.service
|
||||
from gdata.apps import migration
|
||||
|
||||
|
||||
API_VER = '2.0'
|
||||
|
||||
|
||||
class MigrationService(gdata.apps.service.AppsService):
|
||||
"""Client for the EMAPI migration service. Use either ImportMail to import
|
||||
one message at a time, or AddBatchEntry and SubmitBatch to import a batch of
|
||||
messages at a time.
|
||||
"""
|
||||
def __init__(self, email=None, password=None, domain=None, source=None,
|
||||
server='apps-apis.google.com', additional_headers=None):
|
||||
gdata.apps.service.AppsService.__init__(
|
||||
self, email=email, password=password, domain=domain, source=source,
|
||||
server=server, additional_headers=additional_headers)
|
||||
self.mail_batch = migration.BatchMailEventFeed()
|
||||
|
||||
def _BaseURL(self):
|
||||
return '/a/feeds/migration/%s/%s' % (API_VER, self.domain)
|
||||
|
||||
def ImportMail(self, user_name, mail_message, mail_item_properties,
|
||||
mail_labels):
|
||||
"""Import a single mail message.
|
||||
|
||||
Args:
|
||||
user_name: The username to import messages to.
|
||||
mail_message: An RFC822 format email message.
|
||||
mail_item_properties: A list of Gmail properties to apply to the message.
|
||||
mail_labels: A list of labels to apply to the message.
|
||||
|
||||
Returns:
|
||||
A MailEntry representing the successfully imported message.
|
||||
|
||||
Raises:
|
||||
AppsForYourDomainException: An error occurred importing the message.
|
||||
"""
|
||||
uri = '%s/%s/mail' % (self._BaseURL(), user_name)
|
||||
|
||||
mail_entry = migration.MailEntry()
|
||||
mail_entry.rfc822_msg = migration.Rfc822Msg(text=(base64.b64encode(
|
||||
mail_message)))
|
||||
mail_entry.rfc822_msg.encoding = 'base64'
|
||||
mail_entry.mail_item_property = map(
|
||||
lambda x: migration.MailItemProperty(value=x), mail_item_properties)
|
||||
mail_entry.label = map(lambda x: migration.Label(label_name=x),
|
||||
mail_labels)
|
||||
|
||||
try:
|
||||
return migration.MailEntryFromString(str(self.Post(mail_entry, uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
|
||||
|
||||
def AddBatchEntry(self, mail_message, mail_item_properties,
|
||||
mail_labels):
|
||||
"""Add a message to the current batch that you later will submit.
|
||||
|
||||
Args:
|
||||
mail_message: An RFC822 format email message.
|
||||
mail_item_properties: A list of Gmail properties to apply to the message.
|
||||
mail_labels: A list of labels to apply to the message.
|
||||
|
||||
Returns:
|
||||
The length of the MailEntry representing the message.
|
||||
"""
|
||||
mail_entry = migration.BatchMailEntry()
|
||||
mail_entry.rfc822_msg = migration.Rfc822Msg(text=(base64.b64encode(
|
||||
mail_message)))
|
||||
mail_entry.rfc822_msg.encoding = 'base64'
|
||||
mail_entry.mail_item_property = map(
|
||||
lambda x: migration.MailItemProperty(value=x), mail_item_properties)
|
||||
mail_entry.label = map(lambda x: migration.Label(label_name=x),
|
||||
mail_labels)
|
||||
|
||||
self.mail_batch.AddBatchEntry(mail_entry)
|
||||
|
||||
return len(str(mail_entry))
|
||||
|
||||
def SubmitBatch(self, user_name):
|
||||
"""Send a all the mail items you have added to the batch to the server.
|
||||
|
||||
Args:
|
||||
user_name: The username to import messages to.
|
||||
|
||||
Returns:
|
||||
A HTTPResponse from the web service call.
|
||||
|
||||
Raises:
|
||||
AppsForYourDomainException: An error occurred importing the batch.
|
||||
"""
|
||||
uri = '%s/%s/mail/batch' % (self._BaseURL(), user_name)
|
||||
|
||||
try:
|
||||
self.result = self.Post(self.mail_batch, uri,
|
||||
converter=migration.BatchMailEventFeedFromString)
|
||||
except gdata.service.RequestError, e:
|
||||
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
|
||||
|
||||
self.mail_batch = migration.BatchMailEventFeed()
|
||||
|
||||
return self.result
|
||||
0
python/gdata/apps/organization/__init__.py
Normal file
0
python/gdata/apps/organization/__init__.py
Normal file
297
python/gdata/apps/organization/service.py
Normal file
297
python/gdata/apps/organization/service.py
Normal file
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2008 Google, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Allow Google Apps domain administrators to manage organization unit and organization user.
|
||||
|
||||
OrganizationService: Provides methods to manage organization unit and organization user.
|
||||
"""
|
||||
|
||||
__author__ = 'Alexandre Vivien (alex@simplecode.fr)'
|
||||
|
||||
|
||||
import gdata.apps
|
||||
import gdata.apps.service
|
||||
import gdata.service
|
||||
|
||||
|
||||
API_VER = '2.0'
|
||||
CUSTOMER_BASE_URL = '/a/feeds/customer/2.0/customerId'
|
||||
BASE_UNIT_URL = '/a/feeds/orgunit/' + API_VER + '/%s'
|
||||
UNIT_URL = BASE_UNIT_URL + '/%s'
|
||||
UNIT_ALL_URL = BASE_UNIT_URL + '?get=all'
|
||||
UNIT_CHILD_URL = BASE_UNIT_URL + '?get=children&orgUnitPath=%s'
|
||||
BASE_USER_URL = '/a/feeds/orguser/' + API_VER + '/%s'
|
||||
USER_URL = BASE_USER_URL + '/%s'
|
||||
USER_ALL_URL = BASE_USER_URL + '?get=all'
|
||||
USER_CHILD_URL = BASE_USER_URL + '?get=children&orgUnitPath=%s'
|
||||
|
||||
|
||||
class OrganizationService(gdata.apps.service.PropertyService):
|
||||
"""Client for the Google Apps Organizations service."""
|
||||
|
||||
def _Bool2Str(self, b):
|
||||
if b is None:
|
||||
return None
|
||||
return str(b is True).lower()
|
||||
|
||||
def RetrieveCustomerId(self):
|
||||
"""Retrieve the Customer ID for the account of the authenticated administrator making this request.
|
||||
|
||||
Args:
|
||||
None.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the retrieve operation.
|
||||
"""
|
||||
|
||||
uri = CUSTOMER_BASE_URL
|
||||
return self._GetProperties(uri)
|
||||
|
||||
def CreateOrgUnit(self, customer_id, name, parent_org_unit_path='/', description='', block_inheritance=False):
|
||||
"""Create a Organization Unit.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
name: The simple organization unit text name, not the full path name.
|
||||
parent_org_unit_path: The full path of the parental tree to this organization unit (default: '/').
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
description: The human readable text description of the organization unit (optional).
|
||||
block_inheritance: This parameter blocks policy setting inheritance
|
||||
from organization units higher in the organization tree (default: False).
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the create operation.
|
||||
"""
|
||||
|
||||
uri = BASE_UNIT_URL % (customer_id)
|
||||
properties = {}
|
||||
properties['name'] = name
|
||||
properties['parentOrgUnitPath'] = parent_org_unit_path
|
||||
properties['description'] = description
|
||||
properties['blockInheritance'] = self._Bool2Str(block_inheritance)
|
||||
return self._PostProperties(uri, properties)
|
||||
|
||||
def UpdateOrgUnit(self, customer_id, org_unit_path, name=None, parent_org_unit_path=None,
|
||||
description=None, block_inheritance=None):
|
||||
"""Update a Organization Unit.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
org_unit_path: The organization's full path name.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
name: The simple organization unit text name, not the full path name.
|
||||
parent_org_unit_path: The full path of the parental tree to this organization unit.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
description: The human readable text description of the organization unit.
|
||||
block_inheritance: This parameter blocks policy setting inheritance
|
||||
from organization units higher in the organization tree.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
|
||||
uri = UNIT_URL % (customer_id, org_unit_path)
|
||||
properties = {}
|
||||
if name:
|
||||
properties['name'] = name
|
||||
if parent_org_unit_path:
|
||||
properties['parentOrgUnitPath'] = parent_org_unit_path
|
||||
if description:
|
||||
properties['description'] = description
|
||||
if block_inheritance:
|
||||
properties['blockInheritance'] = self._Bool2Str(block_inheritance)
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def MoveUserToOrgUnit(self, customer_id, org_unit_path, users_to_move):
|
||||
"""Move a user to an Organization Unit.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
org_unit_path: The organization's full path name.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
users_to_move: Email addresses list of users to move. Note: You can move a maximum of 25 users at one time.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
|
||||
uri = UNIT_URL % (customer_id, org_unit_path)
|
||||
properties = {}
|
||||
if users_to_move and isinstance(users_to_move, list):
|
||||
properties['usersToMove'] = ', '.join(users_to_move)
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def RetrieveOrgUnit(self, customer_id, org_unit_path):
|
||||
"""Retrieve a Orgunit based on its path.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
org_unit_path: The organization's full path name.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = UNIT_URL % (customer_id, org_unit_path)
|
||||
return self._GetProperties(uri)
|
||||
|
||||
def DeleteOrgUnit(self, customer_id, org_unit_path):
|
||||
"""Delete a Orgunit based on its path.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
org_unit_path: The organization's full path name.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the delete operation.
|
||||
"""
|
||||
uri = UNIT_URL % (customer_id, org_unit_path)
|
||||
return self._DeleteProperties(uri)
|
||||
|
||||
def RetrieveAllOrgUnits(self, customer_id):
|
||||
"""Retrieve all OrgUnits in the customer's domain.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
|
||||
Returns:
|
||||
A list containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = UNIT_ALL_URL % (customer_id)
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def RetrievePageOfOrgUnits(self, customer_id, startKey=None):
|
||||
"""Retrieve one page of OrgUnits in the customer's domain.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
startKey: The key to continue for pagination through all OrgUnits.
|
||||
|
||||
Returns:
|
||||
A feed object containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = UNIT_ALL_URL % (customer_id)
|
||||
if startKey is not None:
|
||||
uri += "&startKey=" + startKey
|
||||
property_feed = self._GetPropertyFeed(uri)
|
||||
return property_feed
|
||||
|
||||
def RetrieveSubOrgUnits(self, customer_id, org_unit_path):
|
||||
"""Retrieve all Sub-OrgUnits of the provided OrgUnit.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
org_unit_path: The organization's full path name.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
|
||||
Returns:
|
||||
A list containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = UNIT_CHILD_URL % (customer_id, org_unit_path)
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def RetrieveOrgUser(self, customer_id, user_email):
|
||||
"""Retrieve the OrgUnit of the user.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
user_email: The email address of the user.
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = USER_URL % (customer_id, user_email)
|
||||
return self._GetProperties(uri)
|
||||
|
||||
def UpdateOrgUser(self, customer_id, user_email, org_unit_path):
|
||||
"""Update the OrgUnit of a OrgUser.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
user_email: The email address of the user.
|
||||
org_unit_path: The new organization's full path name.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
|
||||
Returns:
|
||||
A dict containing the result of the update operation.
|
||||
"""
|
||||
|
||||
uri = USER_URL % (customer_id, user_email)
|
||||
properties = {}
|
||||
if org_unit_path:
|
||||
properties['orgUnitPath'] = org_unit_path
|
||||
return self._PutProperties(uri, properties)
|
||||
|
||||
def RetrieveAllOrgUsers(self, customer_id):
|
||||
"""Retrieve all OrgUsers in the customer's domain.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
|
||||
Returns:
|
||||
A list containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = USER_ALL_URL % (customer_id)
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def RetrievePageOfOrgUsers(self, customer_id, startKey=None):
|
||||
"""Retrieve one page of OrgUsers in the customer's domain.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
startKey: The key to continue for pagination through all OrgUnits.
|
||||
|
||||
Returns:
|
||||
A feed object containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = USER_ALL_URL % (customer_id)
|
||||
if startKey is not None:
|
||||
uri += "&startKey=" + startKey
|
||||
property_feed = self._GetPropertyFeed(uri)
|
||||
return property_feed
|
||||
|
||||
def RetrieveOrgUnitUsers(self, customer_id, org_unit_path):
|
||||
"""Retrieve all OrgUsers of the provided OrgUnit.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
org_unit_path: The organization's full path name.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
|
||||
Returns:
|
||||
A list containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = USER_CHILD_URL % (customer_id, org_unit_path)
|
||||
return self._GetPropertiesList(uri)
|
||||
|
||||
def RetrieveOrgUnitPageOfUsers(self, customer_id, org_unit_path, startKey=None):
|
||||
"""Retrieve one page of OrgUsers of the provided OrgUnit.
|
||||
|
||||
Args:
|
||||
customer_id: The ID of the Google Apps customer.
|
||||
org_unit_path: The organization's full path name.
|
||||
Note: Each element of the path MUST be URL encoded (example: finance%2Forganization/suborganization)
|
||||
startKey: The key to continue for pagination through all OrgUsers.
|
||||
|
||||
Returns:
|
||||
A feed object containing the result of the retrieve operation.
|
||||
"""
|
||||
uri = USER_CHILD_URL % (customer_id, org_unit_path)
|
||||
if startKey is not None:
|
||||
uri += "&startKey=" + startKey
|
||||
property_feed = self._GetPropertyFeed(uri)
|
||||
return property_feed
|
||||
552
python/gdata/apps/service.py
Normal file
552
python/gdata/apps/service.py
Normal file
@@ -0,0 +1,552 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2007 SIOS Technology, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
|
||||
|
||||
try:
|
||||
from xml.etree import cElementTree as ElementTree
|
||||
except ImportError:
|
||||
try:
|
||||
import cElementTree as ElementTree
|
||||
except ImportError:
|
||||
try:
|
||||
from xml.etree import ElementTree
|
||||
except ImportError:
|
||||
from elementtree import ElementTree
|
||||
import urllib
|
||||
import gdata
|
||||
import atom.service
|
||||
import gdata.service
|
||||
import gdata.apps
|
||||
import atom
|
||||
|
||||
API_VER="2.0"
|
||||
HTTP_OK=200
|
||||
|
||||
UNKOWN_ERROR=1000
|
||||
USER_DELETED_RECENTLY=1100
|
||||
USER_SUSPENDED=1101
|
||||
DOMAIN_USER_LIMIT_EXCEEDED=1200
|
||||
DOMAIN_ALIAS_LIMIT_EXCEEDED=1201
|
||||
DOMAIN_SUSPENDED=1202
|
||||
DOMAIN_FEATURE_UNAVAILABLE=1203
|
||||
ENTITY_EXISTS=1300
|
||||
ENTITY_DOES_NOT_EXIST=1301
|
||||
ENTITY_NAME_IS_RESERVED=1302
|
||||
ENTITY_NAME_NOT_VALID=1303
|
||||
INVALID_GIVEN_NAME=1400
|
||||
INVALID_FAMILY_NAME=1401
|
||||
INVALID_PASSWORD=1402
|
||||
INVALID_USERNAME=1403
|
||||
INVALID_HASH_FUNCTION_NAME=1404
|
||||
INVALID_HASH_DIGGEST_LENGTH=1405
|
||||
INVALID_EMAIL_ADDRESS=1406
|
||||
INVALID_QUERY_PARAMETER_VALUE=1407
|
||||
TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500
|
||||
|
||||
DEFAULT_QUOTA_LIMIT='2048'
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AppsForYourDomainException(Error):
|
||||
|
||||
def __init__(self, response):
|
||||
|
||||
Error.__init__(self, response)
|
||||
try:
|
||||
self.element_tree = ElementTree.fromstring(response['body'])
|
||||
self.error_code = int(self.element_tree[0].attrib['errorCode'])
|
||||
self.reason = self.element_tree[0].attrib['reason']
|
||||
self.invalidInput = self.element_tree[0].attrib['invalidInput']
|
||||
except:
|
||||
self.error_code = UNKOWN_ERROR
|
||||
|
||||
|
||||
class AppsService(gdata.service.GDataService):
|
||||
"""Client for the Google Apps Provisioning service."""
|
||||
|
||||
def __init__(self, email=None, password=None, domain=None, source=None,
|
||||
server='apps-apis.google.com', additional_headers=None,
|
||||
**kwargs):
|
||||
"""Creates a client for the Google Apps Provisioning service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
domain: string (optional) The Google Apps domain name.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'apps-apis.google.com'.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service='apps', source=source,
|
||||
server=server, additional_headers=additional_headers, **kwargs)
|
||||
self.ssl = True
|
||||
self.port = 443
|
||||
self.domain = domain
|
||||
|
||||
def _baseURL(self):
|
||||
return "/a/feeds/%s" % self.domain
|
||||
|
||||
def AddAllElementsFromAllPages(self, link_finder, func):
|
||||
"""retrieve all pages and add all elements"""
|
||||
next = link_finder.GetNextLink()
|
||||
while next is not None:
|
||||
next_feed = self.Get(next.href, converter=func)
|
||||
for a_entry in next_feed.entry:
|
||||
link_finder.entry.append(a_entry)
|
||||
next = next_feed.GetNextLink()
|
||||
return link_finder
|
||||
|
||||
def RetrievePageOfEmailLists(self, start_email_list_name=None,
|
||||
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY,
|
||||
backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve one page of email list"""
|
||||
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
|
||||
if start_email_list_name is not None:
|
||||
uri += "?startEmailListName=%s" % start_email_list_name
|
||||
try:
|
||||
return gdata.apps.EmailListFeedFromString(str(self.GetWithRetries(
|
||||
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def GetGeneratorForAllEmailLists(
|
||||
self, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve a generator for all emaillists in this domain."""
|
||||
first_page = self.RetrievePageOfEmailLists(num_retries=num_retries,
|
||||
delay=delay,
|
||||
backoff=backoff)
|
||||
return self.GetGeneratorFromLinkFinder(
|
||||
first_page, gdata.apps.EmailListRecipientFeedFromString,
|
||||
num_retries=num_retries, delay=delay, backoff=backoff)
|
||||
|
||||
def RetrieveAllEmailLists(self):
|
||||
"""Retrieve all email list of a domain."""
|
||||
|
||||
ret = self.RetrievePageOfEmailLists()
|
||||
# pagination
|
||||
return self.AddAllElementsFromAllPages(
|
||||
ret, gdata.apps.EmailListFeedFromString)
|
||||
|
||||
def RetrieveEmailList(self, list_name):
|
||||
"""Retreive a single email list by the list's name."""
|
||||
|
||||
uri = "%s/emailList/%s/%s" % (
|
||||
self._baseURL(), API_VER, list_name)
|
||||
try:
|
||||
return self.Get(uri, converter=gdata.apps.EmailListEntryFromString)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def RetrieveEmailLists(self, recipient):
|
||||
"""Retrieve All Email List Subscriptions for an Email Address."""
|
||||
|
||||
uri = "%s/emailList/%s?recipient=%s" % (
|
||||
self._baseURL(), API_VER, recipient)
|
||||
try:
|
||||
ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
# pagination
|
||||
return self.AddAllElementsFromAllPages(
|
||||
ret, gdata.apps.EmailListFeedFromString)
|
||||
|
||||
def RemoveRecipientFromEmailList(self, recipient, list_name):
|
||||
"""Remove recipient from email list."""
|
||||
|
||||
uri = "%s/emailList/%s/%s/recipient/%s" % (
|
||||
self._baseURL(), API_VER, list_name, recipient)
|
||||
try:
|
||||
self.Delete(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def RetrievePageOfRecipients(self, list_name, start_recipient=None,
|
||||
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY,
|
||||
backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve one page of recipient of an email list. """
|
||||
|
||||
uri = "%s/emailList/%s/%s/recipient" % (
|
||||
self._baseURL(), API_VER, list_name)
|
||||
|
||||
if start_recipient is not None:
|
||||
uri += "?startRecipient=%s" % start_recipient
|
||||
try:
|
||||
return gdata.apps.EmailListRecipientFeedFromString(str(
|
||||
self.GetWithRetries(
|
||||
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def GetGeneratorForAllRecipients(
|
||||
self, list_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve a generator for all recipients of a particular emaillist."""
|
||||
first_page = self.RetrievePageOfRecipients(list_name,
|
||||
num_retries=num_retries,
|
||||
delay=delay,
|
||||
backoff=backoff)
|
||||
return self.GetGeneratorFromLinkFinder(
|
||||
first_page, gdata.apps.EmailListRecipientFeedFromString,
|
||||
num_retries=num_retries, delay=delay, backoff=backoff)
|
||||
|
||||
def RetrieveAllRecipients(self, list_name):
|
||||
"""Retrieve all recipient of an email list."""
|
||||
|
||||
ret = self.RetrievePageOfRecipients(list_name)
|
||||
# pagination
|
||||
return self.AddAllElementsFromAllPages(
|
||||
ret, gdata.apps.EmailListRecipientFeedFromString)
|
||||
|
||||
def AddRecipientToEmailList(self, recipient, list_name):
|
||||
"""Add a recipient to a email list."""
|
||||
|
||||
uri = "%s/emailList/%s/%s/recipient" % (
|
||||
self._baseURL(), API_VER, list_name)
|
||||
recipient_entry = gdata.apps.EmailListRecipientEntry()
|
||||
recipient_entry.who = gdata.apps.Who(email=recipient)
|
||||
|
||||
try:
|
||||
return gdata.apps.EmailListRecipientEntryFromString(
|
||||
str(self.Post(recipient_entry, uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def DeleteEmailList(self, list_name):
|
||||
"""Delete a email list"""
|
||||
|
||||
uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name)
|
||||
try:
|
||||
self.Delete(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def CreateEmailList(self, list_name):
|
||||
"""Create a email list. """
|
||||
|
||||
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
|
||||
email_list_entry = gdata.apps.EmailListEntry()
|
||||
email_list_entry.email_list = gdata.apps.EmailList(name=list_name)
|
||||
try:
|
||||
return gdata.apps.EmailListEntryFromString(
|
||||
str(self.Post(email_list_entry, uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def DeleteNickname(self, nickname):
|
||||
"""Delete a nickname"""
|
||||
|
||||
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
|
||||
try:
|
||||
self.Delete(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def RetrievePageOfNicknames(self, start_nickname=None,
|
||||
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY,
|
||||
backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve one page of nicknames in the domain"""
|
||||
|
||||
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
|
||||
if start_nickname is not None:
|
||||
uri += "?startNickname=%s" % start_nickname
|
||||
try:
|
||||
return gdata.apps.NicknameFeedFromString(str(self.GetWithRetries(
|
||||
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def GetGeneratorForAllNicknames(
|
||||
self, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve a generator for all nicknames in this domain."""
|
||||
first_page = self.RetrievePageOfNicknames(num_retries=num_retries,
|
||||
delay=delay,
|
||||
backoff=backoff)
|
||||
return self.GetGeneratorFromLinkFinder(
|
||||
first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries,
|
||||
delay=delay, backoff=backoff)
|
||||
|
||||
def RetrieveAllNicknames(self):
|
||||
"""Retrieve all nicknames in the domain"""
|
||||
|
||||
ret = self.RetrievePageOfNicknames()
|
||||
# pagination
|
||||
return self.AddAllElementsFromAllPages(
|
||||
ret, gdata.apps.NicknameFeedFromString)
|
||||
|
||||
def GetGeneratorForAllNicknamesOfAUser(
|
||||
self, user_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve a generator for all nicknames of a particular user."""
|
||||
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
|
||||
try:
|
||||
first_page = gdata.apps.NicknameFeedFromString(str(self.GetWithRetries(
|
||||
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
return self.GetGeneratorFromLinkFinder(
|
||||
first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries,
|
||||
delay=delay, backoff=backoff)
|
||||
|
||||
def RetrieveNicknames(self, user_name):
|
||||
"""Retrieve nicknames of the user"""
|
||||
|
||||
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
|
||||
try:
|
||||
ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
# pagination
|
||||
return self.AddAllElementsFromAllPages(
|
||||
ret, gdata.apps.NicknameFeedFromString)
|
||||
|
||||
def RetrieveNickname(self, nickname):
|
||||
"""Retrieve a nickname.
|
||||
|
||||
Args:
|
||||
nickname: string The nickname to retrieve
|
||||
|
||||
Returns:
|
||||
gdata.apps.NicknameEntry
|
||||
"""
|
||||
|
||||
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
|
||||
try:
|
||||
return gdata.apps.NicknameEntryFromString(str(self.Get(uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def CreateNickname(self, user_name, nickname):
|
||||
"""Create a nickname"""
|
||||
|
||||
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
|
||||
nickname_entry = gdata.apps.NicknameEntry()
|
||||
nickname_entry.login = gdata.apps.Login(user_name=user_name)
|
||||
nickname_entry.nickname = gdata.apps.Nickname(name=nickname)
|
||||
|
||||
try:
|
||||
return gdata.apps.NicknameEntryFromString(
|
||||
str(self.Post(nickname_entry, uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def DeleteUser(self, user_name):
|
||||
"""Delete a user account"""
|
||||
|
||||
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
|
||||
try:
|
||||
return self.Delete(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def UpdateUser(self, user_name, user_entry):
|
||||
"""Update a user account."""
|
||||
|
||||
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
|
||||
try:
|
||||
return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def CreateUser(self, user_name, family_name, given_name, password,
|
||||
suspended='false', quota_limit=None,
|
||||
password_hash_function=None,
|
||||
change_password=None):
|
||||
"""Create a user account. """
|
||||
|
||||
uri = "%s/user/%s" % (self._baseURL(), API_VER)
|
||||
user_entry = gdata.apps.UserEntry()
|
||||
user_entry.login = gdata.apps.Login(
|
||||
user_name=user_name, password=password, suspended=suspended,
|
||||
hash_function_name=password_hash_function,
|
||||
change_password=change_password)
|
||||
user_entry.name = gdata.apps.Name(family_name=family_name,
|
||||
given_name=given_name)
|
||||
if quota_limit is not None:
|
||||
user_entry.quota = gdata.apps.Quota(limit=str(quota_limit))
|
||||
|
||||
try:
|
||||
return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def SuspendUser(self, user_name):
|
||||
user_entry = self.RetrieveUser(user_name)
|
||||
if user_entry.login.suspended != 'true':
|
||||
user_entry.login.suspended = 'true'
|
||||
user_entry = self.UpdateUser(user_name, user_entry)
|
||||
return user_entry
|
||||
|
||||
def RestoreUser(self, user_name):
|
||||
user_entry = self.RetrieveUser(user_name)
|
||||
if user_entry.login.suspended != 'false':
|
||||
user_entry.login.suspended = 'false'
|
||||
user_entry = self.UpdateUser(user_name, user_entry)
|
||||
return user_entry
|
||||
|
||||
def RetrieveUser(self, user_name):
|
||||
"""Retrieve an user account.
|
||||
|
||||
Args:
|
||||
user_name: string The user name to retrieve
|
||||
|
||||
Returns:
|
||||
gdata.apps.UserEntry
|
||||
"""
|
||||
|
||||
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
|
||||
try:
|
||||
return gdata.apps.UserEntryFromString(str(self.Get(uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def RetrievePageOfUsers(self, start_username=None,
|
||||
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY,
|
||||
backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve one page of users in this domain."""
|
||||
|
||||
uri = "%s/user/%s" % (self._baseURL(), API_VER)
|
||||
if start_username is not None:
|
||||
uri += "?startUsername=%s" % start_username
|
||||
try:
|
||||
return gdata.apps.UserFeedFromString(str(self.GetWithRetries(
|
||||
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise AppsForYourDomainException(e.args[0])
|
||||
|
||||
def GetGeneratorForAllUsers(self,
|
||||
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
|
||||
delay=gdata.service.DEFAULT_DELAY,
|
||||
backoff=gdata.service.DEFAULT_BACKOFF):
|
||||
"""Retrieve a generator for all users in this domain."""
|
||||
first_page = self.RetrievePageOfUsers(num_retries=num_retries, delay=delay,
|
||||
backoff=backoff)
|
||||
return self.GetGeneratorFromLinkFinder(
|
||||
first_page, gdata.apps.UserFeedFromString, num_retries=num_retries,
|
||||
delay=delay, backoff=backoff)
|
||||
|
||||
def RetrieveAllUsers(self):
|
||||
"""Retrieve all users in this domain. OBSOLETE"""
|
||||
|
||||
ret = self.RetrievePageOfUsers()
|
||||
# pagination
|
||||
return self.AddAllElementsFromAllPages(
|
||||
ret, gdata.apps.UserFeedFromString)
|
||||
|
||||
|
||||
class PropertyService(gdata.service.GDataService):
|
||||
"""Client for the Google Apps Property service."""
|
||||
|
||||
def __init__(self, email=None, password=None, domain=None, source=None,
|
||||
server='apps-apis.google.com', additional_headers=None):
|
||||
gdata.service.GDataService.__init__(self, email=email, password=password,
|
||||
service='apps', source=source,
|
||||
server=server,
|
||||
additional_headers=additional_headers)
|
||||
self.ssl = True
|
||||
self.port = 443
|
||||
self.domain = domain
|
||||
|
||||
def AddAllElementsFromAllPages(self, link_finder, func):
|
||||
"""retrieve all pages and add all elements"""
|
||||
next = link_finder.GetNextLink()
|
||||
while next is not None:
|
||||
next_feed = self.Get(next.href, converter=func)
|
||||
for a_entry in next_feed.entry:
|
||||
link_finder.entry.append(a_entry)
|
||||
next = next_feed.GetNextLink()
|
||||
return link_finder
|
||||
|
||||
def _GetPropertyEntry(self, properties):
|
||||
property_entry = gdata.apps.PropertyEntry()
|
||||
property = []
|
||||
for name, value in properties.iteritems():
|
||||
if name is not None and value is not None:
|
||||
property.append(gdata.apps.Property(name=name, value=value))
|
||||
property_entry.property = property
|
||||
return property_entry
|
||||
|
||||
def _PropertyEntry2Dict(self, property_entry):
|
||||
properties = {}
|
||||
for i, property in enumerate(property_entry.property):
|
||||
properties[property.name] = property.value
|
||||
return properties
|
||||
|
||||
def _GetPropertyFeed(self, uri):
|
||||
try:
|
||||
return gdata.apps.PropertyFeedFromString(str(self.Get(uri)))
|
||||
except gdata.service.RequestError, e:
|
||||
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
|
||||
|
||||
def _GetPropertiesList(self, uri):
|
||||
property_feed = self._GetPropertyFeed(uri)
|
||||
# pagination
|
||||
property_feed = self.AddAllElementsFromAllPages(
|
||||
property_feed, gdata.apps.PropertyFeedFromString)
|
||||
properties_list = []
|
||||
for property_entry in property_feed.entry:
|
||||
properties_list.append(self._PropertyEntry2Dict(property_entry))
|
||||
return properties_list
|
||||
|
||||
def _GetProperties(self, uri):
|
||||
try:
|
||||
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
|
||||
str(self.Get(uri))))
|
||||
except gdata.service.RequestError, e:
|
||||
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
|
||||
|
||||
def _PostProperties(self, uri, properties):
|
||||
property_entry = self._GetPropertyEntry(properties)
|
||||
try:
|
||||
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
|
||||
str(self.Post(property_entry, uri))))
|
||||
except gdata.service.RequestError, e:
|
||||
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
|
||||
|
||||
def _PutProperties(self, uri, properties):
|
||||
property_entry = self._GetPropertyEntry(properties)
|
||||
try:
|
||||
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
|
||||
str(self.Put(property_entry, uri))))
|
||||
except gdata.service.RequestError, e:
|
||||
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
|
||||
|
||||
def _DeleteProperties(self, uri):
|
||||
try:
|
||||
self.Delete(uri)
|
||||
except gdata.service.RequestError, e:
|
||||
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
|
||||
|
||||
|
||||
def _bool2str(b):
|
||||
if b is None:
|
||||
return None
|
||||
return str(b is True).lower()
|
||||
39
python/gdata/apps_property.py
Normal file
39
python/gdata/apps_property.py
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2010 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# This module is used for version 2 of the Google Data APIs.
|
||||
|
||||
|
||||
"""Provides a base class to represent property elements in feeds.
|
||||
|
||||
This module is used for version 2 of the Google Data APIs. The primary class
|
||||
in this module is AppsProperty.
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'Vic Fryzel <vicfryzel@google.com>'
|
||||
|
||||
|
||||
import atom.core
|
||||
import gdata.apps
|
||||
|
||||
|
||||
class AppsProperty(atom.core.XmlElement):
|
||||
"""Represents an <apps:property> element in a feed."""
|
||||
_qname = gdata.apps.APPS_TEMPLATE % 'property'
|
||||
name = 'name'
|
||||
value = 'value'
|
||||
952
python/gdata/auth.py
Normal file
952
python/gdata/auth.py
Normal file
@@ -0,0 +1,952 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2007 - 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import cgi
|
||||
import math
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
import types
|
||||
import urllib
|
||||
import atom.http_interface
|
||||
import atom.token_store
|
||||
import atom.url
|
||||
import gdata.oauth as oauth
|
||||
import gdata.oauth.rsa as oauth_rsa
|
||||
import gdata.tlslite.utils.keyfactory as keyfactory
|
||||
import gdata.tlslite.utils.cryptomath as cryptomath
|
||||
|
||||
import gdata.gauth
|
||||
|
||||
__author__ = 'api.jscudder (Jeff Scudder)'
|
||||
|
||||
|
||||
PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth='
|
||||
AUTHSUB_AUTH_LABEL = 'AuthSub token='
|
||||
|
||||
|
||||
"""This module provides functions and objects used with Google authentication.
|
||||
|
||||
Details on Google authorization mechanisms used with the Google Data APIs can
|
||||
be found here:
|
||||
http://code.google.com/apis/gdata/auth.html
|
||||
http://code.google.com/apis/accounts/
|
||||
|
||||
The essential functions are the following.
|
||||
Related to ClientLogin:
|
||||
generate_client_login_request_body: Constructs the body of an HTTP request to
|
||||
obtain a ClientLogin token for a specific
|
||||
service.
|
||||
extract_client_login_token: Creates a ClientLoginToken with the token from a
|
||||
success response to a ClientLogin request.
|
||||
get_captcha_challenge: If the server responded to the ClientLogin request
|
||||
with a CAPTCHA challenge, this method extracts the
|
||||
CAPTCHA URL and identifying CAPTCHA token.
|
||||
|
||||
Related to AuthSub:
|
||||
generate_auth_sub_url: Constructs a full URL for a AuthSub request. The
|
||||
user's browser must be sent to this Google Accounts
|
||||
URL and redirected back to the app to obtain the
|
||||
AuthSub token.
|
||||
extract_auth_sub_token_from_url: Once the user's browser has been
|
||||
redirected back to the web app, use this
|
||||
function to create an AuthSubToken with
|
||||
the correct authorization token and scope.
|
||||
token_from_http_body: Extracts the AuthSubToken value string from the
|
||||
server's response to an AuthSub session token upgrade
|
||||
request.
|
||||
"""
|
||||
|
||||
def generate_client_login_request_body(email, password, service, source,
|
||||
account_type='HOSTED_OR_GOOGLE', captcha_token=None,
|
||||
captcha_response=None):
|
||||
"""Creates the body of the autentication request
|
||||
|
||||
See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request
|
||||
for more details.
|
||||
|
||||
Args:
|
||||
email: str
|
||||
password: str
|
||||
service: str
|
||||
source: str
|
||||
account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid
|
||||
values are 'GOOGLE' and 'HOSTED'
|
||||
captcha_token: str (optional)
|
||||
captcha_response: str (optional)
|
||||
|
||||
Returns:
|
||||
The HTTP body to send in a request for a client login token.
|
||||
"""
|
||||
return gdata.gauth.generate_client_login_request_body(email, password,
|
||||
service, source, account_type, captcha_token, captcha_response)
|
||||
|
||||
|
||||
GenerateClientLoginRequestBody = generate_client_login_request_body
|
||||
|
||||
|
||||
def GenerateClientLoginAuthToken(http_body):
|
||||
"""Returns the token value to use in Authorization headers.
|
||||
|
||||
Reads the token from the server's response to a Client Login request and
|
||||
creates header value to use in requests.
|
||||
|
||||
Args:
|
||||
http_body: str The body of the server's HTTP response to a Client Login
|
||||
request
|
||||
|
||||
Returns:
|
||||
The value half of an Authorization header.
|
||||
"""
|
||||
token = get_client_login_token(http_body)
|
||||
if token:
|
||||
return 'GoogleLogin auth=%s' % token
|
||||
return None
|
||||
|
||||
|
||||
def get_client_login_token(http_body):
|
||||
"""Returns the token value for a ClientLoginToken.
|
||||
|
||||
Reads the token from the server's response to a Client Login request and
|
||||
creates the token value string to use in requests.
|
||||
|
||||
Args:
|
||||
http_body: str The body of the server's HTTP response to a Client Login
|
||||
request
|
||||
|
||||
Returns:
|
||||
The token value string for a ClientLoginToken.
|
||||
"""
|
||||
return gdata.gauth.get_client_login_token_string(http_body)
|
||||
|
||||
|
||||
def extract_client_login_token(http_body, scopes):
|
||||
"""Parses the server's response and returns a ClientLoginToken.
|
||||
|
||||
Args:
|
||||
http_body: str The body of the server's HTTP response to a Client Login
|
||||
request. It is assumed that the login request was successful.
|
||||
scopes: list containing atom.url.Urls or strs. The scopes list contains
|
||||
all of the partial URLs under which the client login token is
|
||||
valid. For example, if scopes contains ['http://example.com/foo']
|
||||
then the client login token would be valid for
|
||||
http://example.com/foo/bar/baz
|
||||
|
||||
Returns:
|
||||
A ClientLoginToken which is valid for the specified scopes.
|
||||
"""
|
||||
token_string = get_client_login_token(http_body)
|
||||
token = ClientLoginToken(scopes=scopes)
|
||||
token.set_token_string(token_string)
|
||||
return token
|
||||
|
||||
|
||||
def get_captcha_challenge(http_body,
|
||||
captcha_base_url='http://www.google.com/accounts/'):
|
||||
"""Returns the URL and token for a CAPTCHA challenge issued by the server.
|
||||
|
||||
Args:
|
||||
http_body: str The body of the HTTP response from the server which
|
||||
contains the CAPTCHA challenge.
|
||||
captcha_base_url: str This function returns a full URL for viewing the
|
||||
challenge image which is built from the server's response. This
|
||||
base_url is used as the beginning of the URL because the server
|
||||
only provides the end of the URL. For example the server provides
|
||||
'Captcha?ctoken=Hi...N' and the URL for the image is
|
||||
'http://www.google.com/accounts/Captcha?ctoken=Hi...N'
|
||||
|
||||
Returns:
|
||||
A dictionary containing the information needed to repond to the CAPTCHA
|
||||
challenge, the image URL and the ID token of the challenge. The
|
||||
dictionary is in the form:
|
||||
{'token': string identifying the CAPTCHA image,
|
||||
'url': string containing the URL of the image}
|
||||
Returns None if there was no CAPTCHA challenge in the response.
|
||||
"""
|
||||
return gdata.gauth.get_captcha_challenge(http_body, captcha_base_url)
|
||||
|
||||
|
||||
GetCaptchaChallenge = get_captcha_challenge
|
||||
|
||||
|
||||
def GenerateOAuthRequestTokenUrl(
|
||||
oauth_input_params, scopes,
|
||||
request_token_url='https://www.google.com/accounts/OAuthGetRequestToken',
|
||||
extra_parameters=None):
|
||||
"""Generate a URL at which a request for OAuth request token is to be sent.
|
||||
|
||||
Args:
|
||||
oauth_input_params: OAuthInputParams OAuth input parameters.
|
||||
scopes: list of strings The URLs of the services to be accessed.
|
||||
request_token_url: string The beginning of the request token URL. This is
|
||||
normally 'https://www.google.com/accounts/OAuthGetRequestToken' or
|
||||
'/accounts/OAuthGetRequestToken'
|
||||
extra_parameters: dict (optional) key-value pairs as any additional
|
||||
parameters to be included in the URL and signature while making a
|
||||
request for fetching an OAuth request token. All the OAuth parameters
|
||||
are added by default. But if provided through this argument, any
|
||||
default parameters will be overwritten. For e.g. a default parameter
|
||||
oauth_version 1.0 can be overwritten if
|
||||
extra_parameters = {'oauth_version': '2.0'}
|
||||
|
||||
Returns:
|
||||
atom.url.Url OAuth request token URL.
|
||||
"""
|
||||
scopes_string = ' '.join([str(scope) for scope in scopes])
|
||||
parameters = {'scope': scopes_string}
|
||||
if extra_parameters:
|
||||
parameters.update(extra_parameters)
|
||||
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
|
||||
oauth_input_params.GetConsumer(), http_url=request_token_url,
|
||||
parameters=parameters)
|
||||
oauth_request.sign_request(oauth_input_params.GetSignatureMethod(),
|
||||
oauth_input_params.GetConsumer(), None)
|
||||
return atom.url.parse_url(oauth_request.to_url())
|
||||
|
||||
|
||||
def GenerateOAuthAuthorizationUrl(
|
||||
request_token,
|
||||
authorization_url='https://www.google.com/accounts/OAuthAuthorizeToken',
|
||||
callback_url=None, extra_params=None,
|
||||
include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope'):
|
||||
"""Generates URL at which user will login to authorize the request token.
|
||||
|
||||
Args:
|
||||
request_token: gdata.auth.OAuthToken OAuth request token.
|
||||
authorization_url: string The beginning of the authorization URL. This is
|
||||
normally 'https://www.google.com/accounts/OAuthAuthorizeToken' or
|
||||
'/accounts/OAuthAuthorizeToken'
|
||||
callback_url: string (optional) The URL user will be sent to after
|
||||
logging in and granting access.
|
||||
extra_params: dict (optional) Additional parameters to be sent.
|
||||
include_scopes_in_callback: Boolean (default=False) if set to True, and
|
||||
if 'callback_url' is present, the 'callback_url' will be modified to
|
||||
include the scope(s) from the request token as a URL parameter. The
|
||||
key for the 'callback' URL's scope parameter will be
|
||||
OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
|
||||
a parameter to the 'callback' URL, is that the page which receives
|
||||
the OAuth token will be able to tell which URLs the token grants
|
||||
access to.
|
||||
scopes_param_prefix: string (default='oauth_token_scope') The URL
|
||||
parameter key which maps to the list of valid scopes for the token.
|
||||
This URL parameter will be included in the callback URL along with
|
||||
the scopes of the token as value if include_scopes_in_callback=True.
|
||||
|
||||
Returns:
|
||||
atom.url.Url OAuth authorization URL.
|
||||
"""
|
||||
scopes = request_token.scopes
|
||||
if isinstance(scopes, list):
|
||||
scopes = ' '.join(scopes)
|
||||
if include_scopes_in_callback and callback_url:
|
||||
if callback_url.find('?') > -1:
|
||||
callback_url += '&'
|
||||
else:
|
||||
callback_url += '?'
|
||||
callback_url += urllib.urlencode({scopes_param_prefix:scopes})
|
||||
oauth_token = oauth.OAuthToken(request_token.key, request_token.secret)
|
||||
oauth_request = oauth.OAuthRequest.from_token_and_callback(
|
||||
token=oauth_token, callback=callback_url,
|
||||
http_url=authorization_url, parameters=extra_params)
|
||||
return atom.url.parse_url(oauth_request.to_url())
|
||||
|
||||
|
||||
def GenerateOAuthAccessTokenUrl(
|
||||
authorized_request_token,
|
||||
oauth_input_params,
|
||||
access_token_url='https://www.google.com/accounts/OAuthGetAccessToken',
|
||||
oauth_version='1.0',
|
||||
oauth_verifier=None):
|
||||
"""Generates URL at which user will login to authorize the request token.
|
||||
|
||||
Args:
|
||||
authorized_request_token: gdata.auth.OAuthToken OAuth authorized request
|
||||
token.
|
||||
oauth_input_params: OAuthInputParams OAuth input parameters.
|
||||
access_token_url: string The beginning of the authorization URL. This is
|
||||
normally 'https://www.google.com/accounts/OAuthGetAccessToken' or
|
||||
'/accounts/OAuthGetAccessToken'
|
||||
oauth_version: str (default='1.0') oauth_version parameter.
|
||||
oauth_verifier: str (optional) If present, it is assumed that the client
|
||||
will use the OAuth v1.0a protocol which includes passing the
|
||||
oauth_verifier (as returned by the SP) in the access token step.
|
||||
|
||||
Returns:
|
||||
atom.url.Url OAuth access token URL.
|
||||
"""
|
||||
oauth_token = oauth.OAuthToken(authorized_request_token.key,
|
||||
authorized_request_token.secret)
|
||||
parameters = {'oauth_version': oauth_version}
|
||||
if oauth_verifier is not None:
|
||||
parameters['oauth_verifier'] = oauth_verifier
|
||||
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
|
||||
oauth_input_params.GetConsumer(), token=oauth_token,
|
||||
http_url=access_token_url, parameters=parameters)
|
||||
oauth_request.sign_request(oauth_input_params.GetSignatureMethod(),
|
||||
oauth_input_params.GetConsumer(), oauth_token)
|
||||
return atom.url.parse_url(oauth_request.to_url())
|
||||
|
||||
|
||||
def GenerateAuthSubUrl(next, scope, secure=False, session=True,
|
||||
request_url='https://www.google.com/accounts/AuthSubRequest',
|
||||
domain='default'):
|
||||
"""Generate a URL at which the user will login and be redirected back.
|
||||
|
||||
Users enter their credentials on a Google login page and a token is sent
|
||||
to the URL specified in next. See documentation for AuthSub login at:
|
||||
http://code.google.com/apis/accounts/AuthForWebApps.html
|
||||
|
||||
Args:
|
||||
request_url: str The beginning of the request URL. This is normally
|
||||
'http://www.google.com/accounts/AuthSubRequest' or
|
||||
'/accounts/AuthSubRequest'
|
||||
next: string The URL user will be sent to after logging in.
|
||||
scope: string The URL of the service to be accessed.
|
||||
secure: boolean (optional) Determines whether or not the issued token
|
||||
is a secure token.
|
||||
session: boolean (optional) Determines whether or not the issued token
|
||||
can be upgraded to a session token.
|
||||
domain: str (optional) The Google Apps domain for this account. If this
|
||||
is not a Google Apps account, use 'default' which is the default
|
||||
value.
|
||||
"""
|
||||
# Translate True/False values for parameters into numeric values acceoted
|
||||
# by the AuthSub service.
|
||||
if secure:
|
||||
secure = 1
|
||||
else:
|
||||
secure = 0
|
||||
|
||||
if session:
|
||||
session = 1
|
||||
else:
|
||||
session = 0
|
||||
|
||||
request_params = urllib.urlencode({'next': next, 'scope': scope,
|
||||
'secure': secure, 'session': session,
|
||||
'hd': domain})
|
||||
if request_url.find('?') == -1:
|
||||
return '%s?%s' % (request_url, request_params)
|
||||
else:
|
||||
# The request URL already contained url parameters so we should add
|
||||
# the parameters using the & seperator
|
||||
return '%s&%s' % (request_url, request_params)
|
||||
|
||||
|
||||
def generate_auth_sub_url(next, scopes, secure=False, session=True,
|
||||
request_url='https://www.google.com/accounts/AuthSubRequest',
|
||||
domain='default', scopes_param_prefix='auth_sub_scopes'):
|
||||
"""Constructs a URL string for requesting a multiscope AuthSub token.
|
||||
|
||||
The generated token will contain a URL parameter to pass along the
|
||||
requested scopes to the next URL. When the Google Accounts page
|
||||
redirects the broswser to the 'next' URL, it appends the single use
|
||||
AuthSub token value to the URL as a URL parameter with the key 'token'.
|
||||
However, the information about which scopes were requested is not
|
||||
included by Google Accounts. This method adds the scopes to the next
|
||||
URL before making the request so that the redirect will be sent to
|
||||
a page, and both the token value and the list of scopes can be
|
||||
extracted from the request URL.
|
||||
|
||||
Args:
|
||||
next: atom.url.URL or string The URL user will be sent to after
|
||||
authorizing this web application to access their data.
|
||||
scopes: list containint strings The URLs of the services to be accessed.
|
||||
secure: boolean (optional) Determines whether or not the issued token
|
||||
is a secure token.
|
||||
session: boolean (optional) Determines whether or not the issued token
|
||||
can be upgraded to a session token.
|
||||
request_url: atom.url.Url or str The beginning of the request URL. This
|
||||
is normally 'http://www.google.com/accounts/AuthSubRequest' or
|
||||
'/accounts/AuthSubRequest'
|
||||
domain: The domain which the account is part of. This is used for Google
|
||||
Apps accounts, the default value is 'default' which means that the
|
||||
requested account is a Google Account (@gmail.com for example)
|
||||
scopes_param_prefix: str (optional) The requested scopes are added as a
|
||||
URL parameter to the next URL so that the page at the 'next' URL can
|
||||
extract the token value and the valid scopes from the URL. The key
|
||||
for the URL parameter defaults to 'auth_sub_scopes'
|
||||
|
||||
Returns:
|
||||
An atom.url.Url which the user's browser should be directed to in order
|
||||
to authorize this application to access their information.
|
||||
"""
|
||||
if isinstance(next, (str, unicode)):
|
||||
next = atom.url.parse_url(next)
|
||||
scopes_string = ' '.join([str(scope) for scope in scopes])
|
||||
next.params[scopes_param_prefix] = scopes_string
|
||||
|
||||
if isinstance(request_url, (str, unicode)):
|
||||
request_url = atom.url.parse_url(request_url)
|
||||
request_url.params['next'] = str(next)
|
||||
request_url.params['scope'] = scopes_string
|
||||
if session:
|
||||
request_url.params['session'] = 1
|
||||
else:
|
||||
request_url.params['session'] = 0
|
||||
if secure:
|
||||
request_url.params['secure'] = 1
|
||||
else:
|
||||
request_url.params['secure'] = 0
|
||||
request_url.params['hd'] = domain
|
||||
return request_url
|
||||
|
||||
|
||||
def AuthSubTokenFromUrl(url):
|
||||
"""Extracts the AuthSub token from the URL.
|
||||
|
||||
Used after the AuthSub redirect has sent the user to the 'next' page and
|
||||
appended the token to the URL. This function returns the value to be used
|
||||
in the Authorization header.
|
||||
|
||||
Args:
|
||||
url: str The URL of the current page which contains the AuthSub token as
|
||||
a URL parameter.
|
||||
"""
|
||||
token = TokenFromUrl(url)
|
||||
if token:
|
||||
return 'AuthSub token=%s' % token
|
||||
return None
|
||||
|
||||
|
||||
def TokenFromUrl(url):
|
||||
"""Extracts the AuthSub token from the URL.
|
||||
|
||||
Returns the raw token value.
|
||||
|
||||
Args:
|
||||
url: str The URL or the query portion of the URL string (after the ?) of
|
||||
the current page which contains the AuthSub token as a URL parameter.
|
||||
"""
|
||||
if url.find('?') > -1:
|
||||
query_params = url.split('?')[1]
|
||||
else:
|
||||
query_params = url
|
||||
for pair in query_params.split('&'):
|
||||
if pair.startswith('token='):
|
||||
return pair[6:]
|
||||
return None
|
||||
|
||||
|
||||
def extract_auth_sub_token_from_url(url,
|
||||
scopes_param_prefix='auth_sub_scopes', rsa_key=None):
|
||||
"""Creates an AuthSubToken and sets the token value and scopes from the URL.
|
||||
|
||||
After the Google Accounts AuthSub pages redirect the user's broswer back to
|
||||
the web application (using the 'next' URL from the request) the web app must
|
||||
extract the token from the current page's URL. The token is provided as a
|
||||
URL parameter named 'token' and if generate_auth_sub_url was used to create
|
||||
the request, the token's valid scopes are included in a URL parameter whose
|
||||
name is specified in scopes_param_prefix.
|
||||
|
||||
Args:
|
||||
url: atom.url.Url or str representing the current URL. The token value
|
||||
and valid scopes should be included as URL parameters.
|
||||
scopes_param_prefix: str (optional) The URL parameter key which maps to
|
||||
the list of valid scopes for the token.
|
||||
|
||||
Returns:
|
||||
An AuthSubToken with the token value from the URL and set to be valid for
|
||||
the scopes passed in on the URL. If no scopes were included in the URL,
|
||||
the AuthSubToken defaults to being valid for no scopes. If there was no
|
||||
'token' parameter in the URL, this function returns None.
|
||||
"""
|
||||
if isinstance(url, (str, unicode)):
|
||||
url = atom.url.parse_url(url)
|
||||
if 'token' not in url.params:
|
||||
return None
|
||||
scopes = []
|
||||
if scopes_param_prefix in url.params:
|
||||
scopes = url.params[scopes_param_prefix].split(' ')
|
||||
token_value = url.params['token']
|
||||
if rsa_key:
|
||||
token = SecureAuthSubToken(rsa_key, scopes=scopes)
|
||||
else:
|
||||
token = AuthSubToken(scopes=scopes)
|
||||
token.set_token_string(token_value)
|
||||
return token
|
||||
|
||||
|
||||
def AuthSubTokenFromHttpBody(http_body):
|
||||
"""Extracts the AuthSub token from an HTTP body string.
|
||||
|
||||
Used to find the new session token after making a request to upgrade a
|
||||
single use AuthSub token.
|
||||
|
||||
Args:
|
||||
http_body: str The repsonse from the server which contains the AuthSub
|
||||
key. For example, this function would find the new session token
|
||||
from the server's response to an upgrade token request.
|
||||
|
||||
Returns:
|
||||
The header value to use for Authorization which contains the AuthSub
|
||||
token.
|
||||
"""
|
||||
token_value = token_from_http_body(http_body)
|
||||
if token_value:
|
||||
return '%s%s' % (AUTHSUB_AUTH_LABEL, token_value)
|
||||
return None
|
||||
|
||||
|
||||
def token_from_http_body(http_body):
|
||||
"""Extracts the AuthSub token from an HTTP body string.
|
||||
|
||||
Used to find the new session token after making a request to upgrade a
|
||||
single use AuthSub token.
|
||||
|
||||
Args:
|
||||
http_body: str The repsonse from the server which contains the AuthSub
|
||||
key. For example, this function would find the new session token
|
||||
from the server's response to an upgrade token request.
|
||||
|
||||
Returns:
|
||||
The raw token value to use in an AuthSubToken object.
|
||||
"""
|
||||
for response_line in http_body.splitlines():
|
||||
if response_line.startswith('Token='):
|
||||
# Strip off Token= and return the token value string.
|
||||
return response_line[6:]
|
||||
return None
|
||||
|
||||
|
||||
TokenFromHttpBody = token_from_http_body
|
||||
|
||||
|
||||
def OAuthTokenFromUrl(url, scopes_param_prefix='oauth_token_scope'):
|
||||
"""Creates an OAuthToken and sets token key and scopes (if present) from URL.
|
||||
|
||||
After the Google Accounts OAuth pages redirect the user's broswer back to
|
||||
the web application (using the 'callback' URL from the request) the web app
|
||||
can extract the token from the current page's URL. The token is same as the
|
||||
request token, but it is either authorized (if user grants access) or
|
||||
unauthorized (if user denies access). The token is provided as a
|
||||
URL parameter named 'oauth_token' and if it was chosen to use
|
||||
GenerateOAuthAuthorizationUrl with include_scopes_in_param=True, the token's
|
||||
valid scopes are included in a URL parameter whose name is specified in
|
||||
scopes_param_prefix.
|
||||
|
||||
Args:
|
||||
url: atom.url.Url or str representing the current URL. The token value
|
||||
and valid scopes should be included as URL parameters.
|
||||
scopes_param_prefix: str (optional) The URL parameter key which maps to
|
||||
the list of valid scopes for the token.
|
||||
|
||||
Returns:
|
||||
An OAuthToken with the token key from the URL and set to be valid for
|
||||
the scopes passed in on the URL. If no scopes were included in the URL,
|
||||
the OAuthToken defaults to being valid for no scopes. If there was no
|
||||
'oauth_token' parameter in the URL, this function returns None.
|
||||
"""
|
||||
if isinstance(url, (str, unicode)):
|
||||
url = atom.url.parse_url(url)
|
||||
if 'oauth_token' not in url.params:
|
||||
return None
|
||||
scopes = []
|
||||
if scopes_param_prefix in url.params:
|
||||
scopes = url.params[scopes_param_prefix].split(' ')
|
||||
token_key = url.params['oauth_token']
|
||||
token = OAuthToken(key=token_key, scopes=scopes)
|
||||
return token
|
||||
|
||||
|
||||
def OAuthTokenFromHttpBody(http_body):
|
||||
"""Parses the HTTP response body and returns an OAuth token.
|
||||
|
||||
The returned OAuth token will just have key and secret parameters set.
|
||||
It won't have any knowledge about the scopes or oauth_input_params. It is
|
||||
your responsibility to make it aware of the remaining parameters.
|
||||
|
||||
Returns:
|
||||
OAuthToken OAuth token.
|
||||
"""
|
||||
token = oauth.OAuthToken.from_string(http_body)
|
||||
oauth_token = OAuthToken(key=token.key, secret=token.secret)
|
||||
return oauth_token
|
||||
|
||||
|
||||
class OAuthSignatureMethod(object):
|
||||
"""Holds valid OAuth signature methods.
|
||||
|
||||
RSA_SHA1: Class to build signature according to RSA-SHA1 algorithm.
|
||||
HMAC_SHA1: Class to build signature according to HMAC-SHA1 algorithm.
|
||||
"""
|
||||
|
||||
HMAC_SHA1 = oauth.OAuthSignatureMethod_HMAC_SHA1
|
||||
|
||||
class RSA_SHA1(oauth_rsa.OAuthSignatureMethod_RSA_SHA1):
|
||||
"""Provides implementation for abstract methods to return RSA certs."""
|
||||
|
||||
def __init__(self, private_key, public_cert):
|
||||
self.private_key = private_key
|
||||
self.public_cert = public_cert
|
||||
|
||||
def _fetch_public_cert(self, unused_oauth_request):
|
||||
return self.public_cert
|
||||
|
||||
def _fetch_private_cert(self, unused_oauth_request):
|
||||
return self.private_key
|
||||
|
||||
|
||||
class OAuthInputParams(object):
|
||||
"""Stores OAuth input parameters.
|
||||
|
||||
This class is a store for OAuth input parameters viz. consumer key and secret,
|
||||
signature method and RSA key.
|
||||
"""
|
||||
|
||||
def __init__(self, signature_method, consumer_key, consumer_secret=None,
|
||||
rsa_key=None, requestor_id=None):
|
||||
"""Initializes object with parameters required for using OAuth mechanism.
|
||||
|
||||
NOTE: Though consumer_secret and rsa_key are optional, either of the two
|
||||
is required depending on the value of the signature_method.
|
||||
|
||||
Args:
|
||||
signature_method: class which provides implementation for strategy class
|
||||
oauth.oauth.OAuthSignatureMethod. Signature method to be used for
|
||||
signing each request. Valid implementations are provided as the
|
||||
constants defined by gdata.auth.OAuthSignatureMethod. Currently
|
||||
they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
|
||||
gdata.auth.OAuthSignatureMethod.HMAC_SHA1. Instead of passing in
|
||||
the strategy class, you may pass in a string for 'RSA_SHA1' or
|
||||
'HMAC_SHA1'. If you plan to use OAuth on App Engine (or another
|
||||
WSGI environment) I recommend specifying signature method using a
|
||||
string (the only options are 'RSA_SHA1' and 'HMAC_SHA1'). In these
|
||||
environments there are sometimes issues with pickling an object in
|
||||
which a member references a class or function. Storing a string to
|
||||
refer to the signature method mitigates complications when
|
||||
pickling.
|
||||
consumer_key: string Domain identifying third_party web application.
|
||||
consumer_secret: string (optional) Secret generated during registration.
|
||||
Required only for HMAC_SHA1 signature method.
|
||||
rsa_key: string (optional) Private key required for RSA_SHA1 signature
|
||||
method.
|
||||
requestor_id: string (optional) User email adress to make requests on
|
||||
their behalf. This parameter should only be set when performing
|
||||
2 legged OAuth requests.
|
||||
"""
|
||||
if (signature_method == OAuthSignatureMethod.RSA_SHA1
|
||||
or signature_method == 'RSA_SHA1'):
|
||||
self.__signature_strategy = 'RSA_SHA1'
|
||||
elif (signature_method == OAuthSignatureMethod.HMAC_SHA1
|
||||
or signature_method == 'HMAC_SHA1'):
|
||||
self.__signature_strategy = 'HMAC_SHA1'
|
||||
else:
|
||||
self.__signature_strategy = signature_method
|
||||
self.rsa_key = rsa_key
|
||||
self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
|
||||
self.requestor_id = requestor_id
|
||||
|
||||
def __get_signature_method(self):
|
||||
if self.__signature_strategy == 'RSA_SHA1':
|
||||
return OAuthSignatureMethod.RSA_SHA1(self.rsa_key, None)
|
||||
elif self.__signature_strategy == 'HMAC_SHA1':
|
||||
return OAuthSignatureMethod.HMAC_SHA1()
|
||||
else:
|
||||
return self.__signature_strategy()
|
||||
|
||||
def __set_signature_method(self, signature_method):
|
||||
if (signature_method == OAuthSignatureMethod.RSA_SHA1
|
||||
or signature_method == 'RSA_SHA1'):
|
||||
self.__signature_strategy = 'RSA_SHA1'
|
||||
elif (signature_method == OAuthSignatureMethod.HMAC_SHA1
|
||||
or signature_method == 'HMAC_SHA1'):
|
||||
self.__signature_strategy = 'HMAC_SHA1'
|
||||
else:
|
||||
self.__signature_strategy = signature_method
|
||||
|
||||
_signature_method = property(__get_signature_method, __set_signature_method,
|
||||
doc="""Returns object capable of signing the request using RSA of HMAC.
|
||||
|
||||
Replaces the _signature_method member to avoid pickle errors.""")
|
||||
|
||||
def GetSignatureMethod(self):
|
||||
"""Gets the OAuth signature method.
|
||||
|
||||
Returns:
|
||||
object of supertype <oauth.oauth.OAuthSignatureMethod>
|
||||
"""
|
||||
return self._signature_method
|
||||
|
||||
def GetConsumer(self):
|
||||
"""Gets the OAuth consumer.
|
||||
|
||||
Returns:
|
||||
object of type <oauth.oauth.Consumer>
|
||||
"""
|
||||
return self._consumer
|
||||
|
||||
|
||||
class ClientLoginToken(atom.http_interface.GenericToken):
|
||||
"""Stores the Authorization header in auth_header and adds to requests.
|
||||
|
||||
This token will add it's Authorization header to an HTTP request
|
||||
as it is made. Ths token class is simple but
|
||||
some Token classes must calculate portions of the Authorization header
|
||||
based on the request being made, which is why the token is responsible
|
||||
for making requests via an http_client parameter.
|
||||
|
||||
Args:
|
||||
auth_header: str The value for the Authorization header.
|
||||
scopes: list of str or atom.url.Url specifying the beginnings of URLs
|
||||
for which this token can be used. For example, if scopes contains
|
||||
'http://example.com/foo', then this token can be used for a request to
|
||||
'http://example.com/foo/bar' but it cannot be used for a request to
|
||||
'http://example.com/baz'
|
||||
"""
|
||||
def __init__(self, auth_header=None, scopes=None):
|
||||
self.auth_header = auth_header
|
||||
self.scopes = scopes or []
|
||||
|
||||
def __str__(self):
|
||||
return self.auth_header
|
||||
|
||||
def perform_request(self, http_client, operation, url, data=None,
|
||||
headers=None):
|
||||
"""Sets the Authorization header and makes the HTTP request."""
|
||||
if headers is None:
|
||||
headers = {'Authorization':self.auth_header}
|
||||
else:
|
||||
headers['Authorization'] = self.auth_header
|
||||
return http_client.request(operation, url, data=data, headers=headers)
|
||||
|
||||
def get_token_string(self):
|
||||
"""Removes PROGRAMMATIC_AUTH_LABEL to give just the token value."""
|
||||
return self.auth_header[len(PROGRAMMATIC_AUTH_LABEL):]
|
||||
|
||||
def set_token_string(self, token_string):
|
||||
self.auth_header = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, token_string)
|
||||
|
||||
def valid_for_scope(self, url):
|
||||
"""Tells the caller if the token authorizes access to the desired URL.
|
||||
"""
|
||||
if isinstance(url, (str, unicode)):
|
||||
url = atom.url.parse_url(url)
|
||||
for scope in self.scopes:
|
||||
if scope == atom.token_store.SCOPE_ALL:
|
||||
return True
|
||||
if isinstance(scope, (str, unicode)):
|
||||
scope = atom.url.parse_url(scope)
|
||||
if scope == url:
|
||||
return True
|
||||
# Check the host and the path, but ignore the port and protocol.
|
||||
elif scope.host == url.host and not scope.path:
|
||||
return True
|
||||
elif scope.host == url.host and scope.path and not url.path:
|
||||
continue
|
||||
elif scope.host == url.host and url.path.startswith(scope.path):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class AuthSubToken(ClientLoginToken):
|
||||
def get_token_string(self):
|
||||
"""Removes AUTHSUB_AUTH_LABEL to give just the token value."""
|
||||
return self.auth_header[len(AUTHSUB_AUTH_LABEL):]
|
||||
|
||||
def set_token_string(self, token_string):
|
||||
self.auth_header = '%s%s' % (AUTHSUB_AUTH_LABEL, token_string)
|
||||
|
||||
|
||||
class OAuthToken(atom.http_interface.GenericToken):
|
||||
"""Stores the token key, token secret and scopes for which token is valid.
|
||||
|
||||
This token adds the authorization header to each request made. It
|
||||
re-calculates authorization header for every request since the OAuth
|
||||
signature to be added to the authorization header is dependent on the
|
||||
request parameters.
|
||||
|
||||
Attributes:
|
||||
key: str The value for the OAuth token i.e. token key.
|
||||
secret: str The value for the OAuth token secret.
|
||||
scopes: list of str or atom.url.Url specifying the beginnings of URLs
|
||||
for which this token can be used. For example, if scopes contains
|
||||
'http://example.com/foo', then this token can be used for a request to
|
||||
'http://example.com/foo/bar' but it cannot be used for a request to
|
||||
'http://example.com/baz'
|
||||
oauth_input_params: OAuthInputParams OAuth input parameters.
|
||||
"""
|
||||
|
||||
def __init__(self, key=None, secret=None, scopes=None,
|
||||
oauth_input_params=None):
|
||||
self.key = key
|
||||
self.secret = secret
|
||||
self.scopes = scopes or []
|
||||
self.oauth_input_params = oauth_input_params
|
||||
|
||||
def __str__(self):
|
||||
return self.get_token_string()
|
||||
|
||||
def get_token_string(self):
|
||||
"""Returns the token string.
|
||||
|
||||
The token string returned is of format
|
||||
oauth_token=[0]&oauth_token_secret=[1], where [0] and [1] are some strings.
|
||||
|
||||
Returns:
|
||||
A token string of format oauth_token=[0]&oauth_token_secret=[1],
|
||||
where [0] and [1] are some strings. If self.secret is absent, it just
|
||||
returns oauth_token=[0]. If self.key is absent, it just returns
|
||||
oauth_token_secret=[1]. If both are absent, it returns None.
|
||||
"""
|
||||
if self.key and self.secret:
|
||||
return urllib.urlencode({'oauth_token': self.key,
|
||||
'oauth_token_secret': self.secret})
|
||||
elif self.key:
|
||||
return 'oauth_token=%s' % self.key
|
||||
elif self.secret:
|
||||
return 'oauth_token_secret=%s' % self.secret
|
||||
else:
|
||||
return None
|
||||
|
||||
def set_token_string(self, token_string):
|
||||
"""Sets the token key and secret from the token string.
|
||||
|
||||
Args:
|
||||
token_string: str Token string of form
|
||||
oauth_token=[0]&oauth_token_secret=[1]. If oauth_token is not present,
|
||||
self.key will be None. If oauth_token_secret is not present,
|
||||
self.secret will be None.
|
||||
"""
|
||||
token_params = cgi.parse_qs(token_string, keep_blank_values=False)
|
||||
if 'oauth_token' in token_params:
|
||||
self.key = token_params['oauth_token'][0]
|
||||
if 'oauth_token_secret' in token_params:
|
||||
self.secret = token_params['oauth_token_secret'][0]
|
||||
|
||||
def GetAuthHeader(self, http_method, http_url, realm=''):
|
||||
"""Get the authentication header.
|
||||
|
||||
Args:
|
||||
http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc.
|
||||
http_url: string or atom.url.Url HTTP URL to which request is made.
|
||||
realm: string (default='') realm parameter to be included in the
|
||||
authorization header.
|
||||
|
||||
Returns:
|
||||
dict Header to be sent with every subsequent request after
|
||||
authentication.
|
||||
"""
|
||||
if isinstance(http_url, types.StringTypes):
|
||||
http_url = atom.url.parse_url(http_url)
|
||||
header = None
|
||||
token = None
|
||||
if self.key or self.secret:
|
||||
token = oauth.OAuthToken(self.key, self.secret)
|
||||
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
|
||||
self.oauth_input_params.GetConsumer(), token=token,
|
||||
http_url=str(http_url), http_method=http_method,
|
||||
parameters=http_url.params)
|
||||
oauth_request.sign_request(self.oauth_input_params.GetSignatureMethod(),
|
||||
self.oauth_input_params.GetConsumer(), token)
|
||||
header = oauth_request.to_header(realm=realm)
|
||||
header['Authorization'] = header['Authorization'].replace('+', '%2B')
|
||||
return header
|
||||
|
||||
def perform_request(self, http_client, operation, url, data=None,
|
||||
headers=None):
|
||||
"""Sets the Authorization header and makes the HTTP request."""
|
||||
if not headers:
|
||||
headers = {}
|
||||
if self.oauth_input_params.requestor_id:
|
||||
url.params['xoauth_requestor_id'] = self.oauth_input_params.requestor_id
|
||||
headers.update(self.GetAuthHeader(operation, url))
|
||||
return http_client.request(operation, url, data=data, headers=headers)
|
||||
|
||||
def valid_for_scope(self, url):
|
||||
if isinstance(url, (str, unicode)):
|
||||
url = atom.url.parse_url(url)
|
||||
for scope in self.scopes:
|
||||
if scope == atom.token_store.SCOPE_ALL:
|
||||
return True
|
||||
if isinstance(scope, (str, unicode)):
|
||||
scope = atom.url.parse_url(scope)
|
||||
if scope == url:
|
||||
return True
|
||||
# Check the host and the path, but ignore the port and protocol.
|
||||
elif scope.host == url.host and not scope.path:
|
||||
return True
|
||||
elif scope.host == url.host and scope.path and not url.path:
|
||||
continue
|
||||
elif scope.host == url.host and url.path.startswith(scope.path):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class SecureAuthSubToken(AuthSubToken):
|
||||
"""Stores the rsa private key, token, and scopes for the secure AuthSub token.
|
||||
|
||||
This token adds the authorization header to each request made. It
|
||||
re-calculates authorization header for every request since the secure AuthSub
|
||||
signature to be added to the authorization header is dependent on the
|
||||
request parameters.
|
||||
|
||||
Attributes:
|
||||
rsa_key: string The RSA private key in PEM format that the token will
|
||||
use to sign requests
|
||||
token_string: string (optional) The value for the AuthSub token.
|
||||
scopes: list of str or atom.url.Url specifying the beginnings of URLs
|
||||
for which this token can be used. For example, if scopes contains
|
||||
'http://example.com/foo', then this token can be used for a request to
|
||||
'http://example.com/foo/bar' but it cannot be used for a request to
|
||||
'http://example.com/baz'
|
||||
"""
|
||||
|
||||
def __init__(self, rsa_key, token_string=None, scopes=None):
|
||||
self.rsa_key = keyfactory.parsePEMKey(rsa_key)
|
||||
self.token_string = token_string or ''
|
||||
self.scopes = scopes or []
|
||||
|
||||
def __str__(self):
|
||||
return self.get_token_string()
|
||||
|
||||
def get_token_string(self):
|
||||
return str(self.token_string)
|
||||
|
||||
def set_token_string(self, token_string):
|
||||
self.token_string = token_string
|
||||
|
||||
def GetAuthHeader(self, http_method, http_url):
|
||||
"""Generates the Authorization header.
|
||||
|
||||
The form of the secure AuthSub Authorization header is
|
||||
Authorization: AuthSub token="token" sigalg="sigalg" data="data" sig="sig"
|
||||
and data represents a string in the form
|
||||
data = http_method http_url timestamp nonce
|
||||
|
||||
Args:
|
||||
http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc.
|
||||
http_url: string or atom.url.Url HTTP URL to which request is made.
|
||||
|
||||
Returns:
|
||||
dict Header to be sent with every subsequent request after authentication.
|
||||
"""
|
||||
timestamp = int(math.floor(time.time()))
|
||||
nonce = '%lu' % random.randrange(1, 2**64)
|
||||
data = '%s %s %d %s' % (http_method, str(http_url), timestamp, nonce)
|
||||
sig = cryptomath.bytesToBase64(self.rsa_key.hashAndSign(data))
|
||||
header = {'Authorization': '%s"%s" data="%s" sig="%s" sigalg="rsa-sha1"' %
|
||||
(AUTHSUB_AUTH_LABEL, self.token_string, data, sig)}
|
||||
return header
|
||||
|
||||
def perform_request(self, http_client, operation, url, data=None,
|
||||
headers=None):
|
||||
"""Sets the Authorization header and makes the HTTP request."""
|
||||
if not headers:
|
||||
headers = {}
|
||||
headers.update(self.GetAuthHeader(operation, url))
|
||||
return http_client.request(operation, url, data=data, headers=headers)
|
||||
697
python/gdata/base/__init__.py
Normal file
697
python/gdata/base/__init__.py
Normal file
@@ -0,0 +1,697 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2006 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains extensions to Atom objects used with Google Base."""
|
||||
|
||||
|
||||
__author__ = 'api.jscudder (Jeffrey Scudder)'
|
||||
|
||||
|
||||
try:
|
||||
from xml.etree import cElementTree as ElementTree
|
||||
except ImportError:
|
||||
try:
|
||||
import cElementTree as ElementTree
|
||||
except ImportError:
|
||||
try:
|
||||
from xml.etree import ElementTree
|
||||
except ImportError:
|
||||
from elementtree import ElementTree
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
# XML namespaces which are often used in Google Base entities.
|
||||
GBASE_NAMESPACE = 'http://base.google.com/ns/1.0'
|
||||
GBASE_TEMPLATE = '{http://base.google.com/ns/1.0}%s'
|
||||
GMETA_NAMESPACE = 'http://base.google.com/ns-metadata/1.0'
|
||||
GMETA_TEMPLATE = '{http://base.google.com/ns-metadata/1.0}%s'
|
||||
|
||||
|
||||
class ItemAttributeContainer(atom.AtomBase):
|
||||
"""Provides methods for finding Google Base Item attributes.
|
||||
|
||||
Google Base item attributes are child nodes in the gbase namespace. Google
|
||||
Base allows you to define your own item attributes and this class provides
|
||||
methods to interact with the custom attributes.
|
||||
"""
|
||||
|
||||
def GetItemAttributes(self, name):
|
||||
"""Returns a list of all item attributes which have the desired name.
|
||||
|
||||
Args:
|
||||
name: str The tag of the desired base attributes. For example, calling
|
||||
this method with 'rating' would return a list of ItemAttributes
|
||||
represented by a 'g:rating' tag.
|
||||
|
||||
Returns:
|
||||
A list of matching ItemAttribute objects.
|
||||
"""
|
||||
result = []
|
||||
for attrib in self.item_attributes:
|
||||
if attrib.name == name:
|
||||
result.append(attrib)
|
||||
return result
|
||||
|
||||
def FindItemAttribute(self, name):
|
||||
"""Get the contents of the first Base item attribute which matches name.
|
||||
|
||||
This method is deprecated, please use GetItemAttributes instead.
|
||||
|
||||
Args:
|
||||
name: str The tag of the desired base attribute. For example, calling
|
||||
this method with name = 'rating' would search for a tag rating
|
||||
in the GBase namespace in the item attributes.
|
||||
|
||||
Returns:
|
||||
The text contents of the item attribute, or none if the attribute was
|
||||
not found.
|
||||
"""
|
||||
|
||||
for attrib in self.item_attributes:
|
||||
if attrib.name == name:
|
||||
return attrib.text
|
||||
return None
|
||||
|
||||
def AddItemAttribute(self, name, value, value_type=None, access=None):
|
||||
"""Adds a new item attribute tag containing the value.
|
||||
|
||||
Creates a new extension element in the GBase namespace to represent a
|
||||
Google Base item attribute.
|
||||
|
||||
Args:
|
||||
name: str The tag name for the new attribute. This must be a valid xml
|
||||
tag name. The tag will be placed in the GBase namespace.
|
||||
value: str Contents for the item attribute
|
||||
value_type: str (optional) The type of data in the vlaue, Examples: text
|
||||
float
|
||||
access: str (optional) Used to hide attributes. The attribute is not
|
||||
exposed in the snippets feed if access is set to 'private'.
|
||||
"""
|
||||
|
||||
new_attribute = ItemAttribute(name, text=value,
|
||||
text_type=value_type, access=access)
|
||||
self.item_attributes.append(new_attribute)
|
||||
return new_attribute
|
||||
|
||||
def SetItemAttribute(self, name, value):
|
||||
"""Changes an existing item attribute's value."""
|
||||
|
||||
for attrib in self.item_attributes:
|
||||
if attrib.name == name:
|
||||
attrib.text = value
|
||||
return
|
||||
|
||||
def RemoveItemAttribute(self, name):
|
||||
"""Deletes the first extension element which matches name.
|
||||
|
||||
Deletes the first extension element which matches name.
|
||||
"""
|
||||
|
||||
for i in xrange(len(self.item_attributes)):
|
||||
if self.item_attributes[i].name == name:
|
||||
del self.item_attributes[i]
|
||||
return
|
||||
|
||||
# We need to overwrite _ConvertElementTreeToMember to add special logic to
|
||||
# convert custom attributes to members
|
||||
def _ConvertElementTreeToMember(self, child_tree):
|
||||
# Find the element's tag in this class's list of child members
|
||||
if self.__class__._children.has_key(child_tree.tag):
|
||||
member_name = self.__class__._children[child_tree.tag][0]
|
||||
member_class = self.__class__._children[child_tree.tag][1]
|
||||
# If the class member is supposed to contain a list, make sure the
|
||||
# matching member is set to a list, then append the new member
|
||||
# instance to the list.
|
||||
if isinstance(member_class, list):
|
||||
if getattr(self, member_name) is None:
|
||||
setattr(self, member_name, [])
|
||||
getattr(self, member_name).append(atom._CreateClassFromElementTree(
|
||||
member_class[0], child_tree))
|
||||
else:
|
||||
setattr(self, member_name,
|
||||
atom._CreateClassFromElementTree(member_class, child_tree))
|
||||
elif child_tree.tag.find('{%s}' % GBASE_NAMESPACE) == 0:
|
||||
# If this is in the gbase namespace, make it into an extension element.
|
||||
name = child_tree.tag[child_tree.tag.index('}')+1:]
|
||||
value = child_tree.text
|
||||
if child_tree.attrib.has_key('type'):
|
||||
value_type = child_tree.attrib['type']
|
||||
else:
|
||||
value_type = None
|
||||
attrib=self.AddItemAttribute(name, value, value_type)
|
||||
for sub in child_tree.getchildren():
|
||||
sub_name = sub.tag[sub.tag.index('}')+1:]
|
||||
sub_value=sub.text
|
||||
if sub.attrib.has_key('type'):
|
||||
sub_type = sub.attrib['type']
|
||||
else:
|
||||
sub_type=None
|
||||
attrib.AddItemAttribute(sub_name, sub_value, sub_type)
|
||||
else:
|
||||
atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
|
||||
|
||||
# We need to overwtite _AddMembersToElementTree to add special logic to
|
||||
# convert custom members to XML nodes.
|
||||
def _AddMembersToElementTree(self, tree):
|
||||
# Convert the members of this class which are XML child nodes.
|
||||
# This uses the class's _children dictionary to find the members which
|
||||
# should become XML child nodes.
|
||||
member_node_names = [values[0] for tag, values in
|
||||
self.__class__._children.iteritems()]
|
||||
for member_name in member_node_names:
|
||||
member = getattr(self, member_name)
|
||||
if member is None:
|
||||
pass
|
||||
elif isinstance(member, list):
|
||||
for instance in member:
|
||||
instance._BecomeChildElement(tree)
|
||||
else:
|
||||
member._BecomeChildElement(tree)
|
||||
# Convert the members of this class which are XML attributes.
|
||||
for xml_attribute, member_name in self.__class__._attributes.iteritems():
|
||||
member = getattr(self, member_name)
|
||||
if member is not None:
|
||||
tree.attrib[xml_attribute] = member
|
||||
# Convert all special custom item attributes to nodes
|
||||
for attribute in self.item_attributes:
|
||||
attribute._BecomeChildElement(tree)
|
||||
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
|
||||
# convert any extension attributes.
|
||||
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
|
||||
|
||||
|
||||
class ItemAttribute(ItemAttributeContainer):
|
||||
"""An optional or user defined attribute for a GBase item.
|
||||
|
||||
Google Base allows items to have custom attribute child nodes. These nodes
|
||||
have contents and a type attribute which tells Google Base whether the
|
||||
contents are text, a float value with units, etc. The Atom text class has
|
||||
the same structure, so this class inherits from Text.
|
||||
"""
|
||||
|
||||
_namespace = GBASE_NAMESPACE
|
||||
_children = atom.Text._children.copy()
|
||||
_attributes = atom.Text._attributes.copy()
|
||||
_attributes['access'] = 'access'
|
||||
|
||||
def __init__(self, name, text_type=None, access=None, text=None,
|
||||
extension_elements=None, extension_attributes=None, item_attributes=None):
|
||||
"""Constructor for a GBase item attribute
|
||||
|
||||
Args:
|
||||
name: str The name of the attribute. Examples include
|
||||
price, color, make, model, pages, salary, etc.
|
||||
text_type: str (optional) The type associated with the text contents
|
||||
access: str (optional) If the access attribute is set to 'private', the
|
||||
attribute will not be included in the item's description in the
|
||||
snippets feed
|
||||
text: str (optional) The text data in the this element
|
||||
extension_elements: list (optional) A list of ExtensionElement
|
||||
instances
|
||||
extension_attributes: dict (optional) A dictionary of attribute
|
||||
value string pairs
|
||||
"""
|
||||
|
||||
self.name = name
|
||||
self.type = text_type
|
||||
self.access = access
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
self.item_attributes = item_attributes or []
|
||||
|
||||
def _BecomeChildElement(self, tree):
|
||||
new_child = ElementTree.Element('')
|
||||
tree.append(new_child)
|
||||
new_child.tag = '{%s}%s' % (self.__class__._namespace,
|
||||
self.name)
|
||||
self._AddMembersToElementTree(new_child)
|
||||
|
||||
def _ToElementTree(self):
|
||||
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
|
||||
self.name))
|
||||
self._AddMembersToElementTree(new_tree)
|
||||
return new_tree
|
||||
|
||||
|
||||
def ItemAttributeFromString(xml_string):
|
||||
element_tree = ElementTree.fromstring(xml_string)
|
||||
return _ItemAttributeFromElementTree(element_tree)
|
||||
|
||||
|
||||
def _ItemAttributeFromElementTree(element_tree):
|
||||
if element_tree.tag.find(GBASE_TEMPLATE % '') == 0:
|
||||
to_return = ItemAttribute('')
|
||||
to_return._HarvestElementTree(element_tree)
|
||||
to_return.name = element_tree.tag[element_tree.tag.index('}')+1:]
|
||||
if to_return.name and to_return.name != '':
|
||||
return to_return
|
||||
return None
|
||||
|
||||
|
||||
class Label(atom.AtomBase):
|
||||
"""The Google Base label element"""
|
||||
|
||||
_tag = 'label'
|
||||
_namespace = GBASE_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
def __init__(self, text=None, extension_elements=None,
|
||||
extension_attributes=None):
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def LabelFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Label, xml_string)
|
||||
|
||||
|
||||
class Thumbnail(atom.AtomBase):
|
||||
"""The Google Base thumbnail element"""
|
||||
|
||||
_tag = 'thumbnail'
|
||||
_namespace = GMETA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['width'] = 'width'
|
||||
_attributes['height'] = 'height'
|
||||
|
||||
def __init__(self, width=None, height=None, text=None, extension_elements=None,
|
||||
extension_attributes=None):
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
self.width = width
|
||||
self.height = height
|
||||
|
||||
|
||||
def ThumbnailFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Thumbnail, xml_string)
|
||||
|
||||
|
||||
class ImageLink(atom.Text):
|
||||
"""The Google Base image_link element"""
|
||||
|
||||
_tag = 'image_link'
|
||||
_namespace = GBASE_NAMESPACE
|
||||
_children = atom.Text._children.copy()
|
||||
_attributes = atom.Text._attributes.copy()
|
||||
_children['{%s}thumbnail' % GMETA_NAMESPACE] = ('thumbnail', [Thumbnail])
|
||||
|
||||
def __init__(self, thumbnail=None, text=None, extension_elements=None,
|
||||
text_type=None, extension_attributes=None):
|
||||
self.thumbnail = thumbnail or []
|
||||
self.text = text
|
||||
self.type = text_type
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def ImageLinkFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(ImageLink, xml_string)
|
||||
|
||||
|
||||
class ItemType(atom.Text):
|
||||
"""The Google Base item_type element"""
|
||||
|
||||
_tag = 'item_type'
|
||||
_namespace = GBASE_NAMESPACE
|
||||
_children = atom.Text._children.copy()
|
||||
_attributes = atom.Text._attributes.copy()
|
||||
|
||||
def __init__(self, text=None, extension_elements=None,
|
||||
text_type=None, extension_attributes=None):
|
||||
self.text = text
|
||||
self.type = text_type
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def ItemTypeFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(ItemType, xml_string)
|
||||
|
||||
|
||||
class MetaItemType(ItemType):
|
||||
"""The Google Base item_type element"""
|
||||
|
||||
_tag = 'item_type'
|
||||
_namespace = GMETA_NAMESPACE
|
||||
_children = ItemType._children.copy()
|
||||
_attributes = ItemType._attributes.copy()
|
||||
|
||||
|
||||
def MetaItemTypeFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(MetaItemType, xml_string)
|
||||
|
||||
|
||||
class Value(atom.AtomBase):
|
||||
"""Metadata about common values for a given attribute
|
||||
|
||||
A value is a child of an attribute which comes from the attributes feed.
|
||||
The value's text is a commonly used value paired with an attribute name
|
||||
and the value's count tells how often this value appears for the given
|
||||
attribute in the search results.
|
||||
"""
|
||||
|
||||
_tag = 'value'
|
||||
_namespace = GMETA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['count'] = 'count'
|
||||
|
||||
def __init__(self, count=None, text=None, extension_elements=None,
|
||||
extension_attributes=None):
|
||||
"""Constructor for Attribute metadata element
|
||||
|
||||
Args:
|
||||
count: str (optional) The number of times the value in text is given
|
||||
for the parent attribute.
|
||||
text: str (optional) The value which appears in the search results.
|
||||
extension_elements: list (optional) A list of ExtensionElement
|
||||
instances
|
||||
extension_attributes: dict (optional) A dictionary of attribute value
|
||||
string pairs
|
||||
"""
|
||||
|
||||
self.count = count
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def ValueFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Value, xml_string)
|
||||
|
||||
|
||||
class Attribute(atom.Text):
|
||||
"""Metadata about an attribute from the attributes feed
|
||||
|
||||
An entry from the attributes feed contains a list of attributes. Each
|
||||
attribute describes the attribute's type and count of the items which
|
||||
use the attribute.
|
||||
"""
|
||||
|
||||
_tag = 'attribute'
|
||||
_namespace = GMETA_NAMESPACE
|
||||
_children = atom.Text._children.copy()
|
||||
_attributes = atom.Text._attributes.copy()
|
||||
_children['{%s}value' % GMETA_NAMESPACE] = ('value', [Value])
|
||||
_attributes['count'] = 'count'
|
||||
_attributes['name'] = 'name'
|
||||
|
||||
def __init__(self, name=None, attribute_type=None, count=None, value=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
"""Constructor for Attribute metadata element
|
||||
|
||||
Args:
|
||||
name: str (optional) The name of the attribute
|
||||
attribute_type: str (optional) The type for the attribute. Examples:
|
||||
test, float, etc.
|
||||
count: str (optional) The number of times this attribute appears in
|
||||
the query results.
|
||||
value: list (optional) The values which are often used for this
|
||||
attirbute.
|
||||
text: str (optional) The text contents of the XML for this attribute.
|
||||
extension_elements: list (optional) A list of ExtensionElement
|
||||
instances
|
||||
extension_attributes: dict (optional) A dictionary of attribute value
|
||||
string pairs
|
||||
"""
|
||||
|
||||
self.name = name
|
||||
self.type = attribute_type
|
||||
self.count = count
|
||||
self.value = value or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def AttributeFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Attribute, xml_string)
|
||||
|
||||
|
||||
class Attributes(atom.AtomBase):
|
||||
"""A collection of Google Base metadata attributes"""
|
||||
|
||||
_tag = 'attributes'
|
||||
_namespace = GMETA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
|
||||
|
||||
def __init__(self, attribute=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.attribute = attribute or []
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
self.text = text
|
||||
|
||||
|
||||
class GBaseItem(ItemAttributeContainer, gdata.BatchEntry):
|
||||
"""An Google Base flavor of an Atom Entry.
|
||||
|
||||
Google Base items have required attributes, recommended attributes, and user
|
||||
defined attributes. The required attributes are stored in this class as
|
||||
members, and other attributes are stored as extension elements. You can
|
||||
access the recommended and user defined attributes by using
|
||||
AddItemAttribute, SetItemAttribute, FindItemAttribute, and
|
||||
RemoveItemAttribute.
|
||||
|
||||
The Base Item
|
||||
"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.BatchEntry._children.copy()
|
||||
_attributes = gdata.BatchEntry._attributes.copy()
|
||||
_children['{%s}label' % GBASE_NAMESPACE] = ('label', [Label])
|
||||
_children['{%s}item_type' % GBASE_NAMESPACE] = ('item_type', ItemType)
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
contributor=None, atom_id=None, link=None, published=None, rights=None,
|
||||
source=None, summary=None, title=None, updated=None, control=None,
|
||||
label=None, item_type=None, item_attributes=None,
|
||||
batch_operation=None, batch_id=None, batch_status=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
self.author = author or []
|
||||
self.category = category or []
|
||||
self.content = content
|
||||
self.contributor = contributor or []
|
||||
self.id = atom_id
|
||||
self.link = link or []
|
||||
self.published = published
|
||||
self.rights = rights
|
||||
self.source = source
|
||||
self.summary = summary
|
||||
self.title = title
|
||||
self.updated = updated
|
||||
self.control = control
|
||||
self.label = label or []
|
||||
self.item_type = item_type
|
||||
self.item_attributes = item_attributes or []
|
||||
self.batch_operation = batch_operation
|
||||
self.batch_id = batch_id
|
||||
self.batch_status = batch_status
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def GBaseItemFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseItem, xml_string)
|
||||
|
||||
|
||||
class GBaseSnippet(GBaseItem):
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = GBaseItem._children.copy()
|
||||
_attributes = GBaseItem._attributes.copy()
|
||||
|
||||
|
||||
def GBaseSnippetFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseSnippet, xml_string)
|
||||
|
||||
|
||||
class GBaseAttributeEntry(gdata.GDataEntry):
|
||||
"""An Atom Entry from the attributes feed"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
contributor=None, atom_id=None, link=None, published=None, rights=None,
|
||||
source=None, summary=None, title=None, updated=None, label=None,
|
||||
attribute=None, control=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
self.author = author or []
|
||||
self.category = category or []
|
||||
self.content = content
|
||||
self.contributor = contributor or []
|
||||
self.id = atom_id
|
||||
self.link = link or []
|
||||
self.published = published
|
||||
self.rights = rights
|
||||
self.source = source
|
||||
self.summary = summary
|
||||
self.control = control
|
||||
self.title = title
|
||||
self.updated = updated
|
||||
self.label = label or []
|
||||
self.attribute = attribute or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def GBaseAttributeEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseAttributeEntry, xml_string)
|
||||
|
||||
|
||||
class GBaseItemTypeEntry(gdata.GDataEntry):
|
||||
"""An Atom entry from the item types feed
|
||||
|
||||
These entries contain a list of attributes which are stored in one
|
||||
XML node called attributes. This class simplifies the data structure
|
||||
by treating attributes as a list of attribute instances.
|
||||
|
||||
Note that the item_type for an item type entry is in the Google Base meta
|
||||
namespace as opposed to item_types encountered in other feeds.
|
||||
"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}attributes' % GMETA_NAMESPACE] = ('attributes', Attributes)
|
||||
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
|
||||
_children['{%s}item_type' % GMETA_NAMESPACE] = ('item_type', MetaItemType)
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
contributor=None, atom_id=None, link=None, published=None, rights=None,
|
||||
source=None, summary=None, title=None, updated=None, label=None,
|
||||
item_type=None, control=None, attribute=None, attributes=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
self.author = author or []
|
||||
self.category = category or []
|
||||
self.content = content
|
||||
self.contributor = contributor or []
|
||||
self.id = atom_id
|
||||
self.link = link or []
|
||||
self.published = published
|
||||
self.rights = rights
|
||||
self.source = source
|
||||
self.summary = summary
|
||||
self.title = title
|
||||
self.updated = updated
|
||||
self.control = control
|
||||
self.label = label or []
|
||||
self.item_type = item_type
|
||||
self.attributes = attributes
|
||||
self.attribute = attribute or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
def GBaseItemTypeEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseItemTypeEntry, xml_string)
|
||||
|
||||
|
||||
class GBaseItemFeed(gdata.BatchFeed):
|
||||
"""A feed containing Google Base Items"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.BatchFeed._children.copy()
|
||||
_attributes = gdata.BatchFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItem])
|
||||
|
||||
|
||||
def GBaseItemFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseItemFeed, xml_string)
|
||||
|
||||
|
||||
class GBaseSnippetFeed(gdata.GDataFeed):
|
||||
"""A feed containing Google Base Snippets"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseSnippet])
|
||||
|
||||
|
||||
def GBaseSnippetFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseSnippetFeed, xml_string)
|
||||
|
||||
|
||||
class GBaseAttributesFeed(gdata.GDataFeed):
|
||||
"""A feed containing Google Base Attributes
|
||||
|
||||
A query sent to the attributes feed will return a feed of
|
||||
attributes which are present in the items that match the
|
||||
query.
|
||||
"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
|
||||
[GBaseAttributeEntry])
|
||||
|
||||
|
||||
def GBaseAttributesFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseAttributesFeed, xml_string)
|
||||
|
||||
|
||||
class GBaseLocalesFeed(gdata.GDataFeed):
|
||||
"""The locales feed from Google Base.
|
||||
|
||||
This read-only feed defines the permitted locales for Google Base. The
|
||||
locale value identifies the language, currency, and date formats used in a
|
||||
feed.
|
||||
"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
|
||||
|
||||
def GBaseLocalesFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseLocalesFeed, xml_string)
|
||||
|
||||
|
||||
class GBaseItemTypesFeed(gdata.GDataFeed):
|
||||
"""A feed from the Google Base item types feed"""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItemTypeEntry])
|
||||
|
||||
|
||||
def GBaseItemTypesFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GBaseItemTypesFeed, xml_string)
|
||||
256
python/gdata/base/service.py
Normal file
256
python/gdata/base/service.py
Normal file
@@ -0,0 +1,256 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2006 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""GBaseService extends the GDataService to streamline Google Base operations.
|
||||
|
||||
GBaseService: Provides methods to query feeds and manipulate items. Extends
|
||||
GDataService.
|
||||
|
||||
DictionaryToParamList: Function which converts a dictionary into a list of
|
||||
URL arguments (represented as strings). This is a
|
||||
utility function used in CRUD operations.
|
||||
"""
|
||||
|
||||
__author__ = 'api.jscudder (Jeffrey Scudder)'
|
||||
|
||||
import urllib
|
||||
import gdata
|
||||
import atom.service
|
||||
import gdata.service
|
||||
import gdata.base
|
||||
import atom
|
||||
|
||||
|
||||
# URL to which all batch requests are sent.
|
||||
BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch'
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RequestError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class GBaseService(gdata.service.GDataService):
|
||||
"""Client for the Google Base service."""
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server='base.google.com', api_key=None, additional_headers=None,
|
||||
handler=None, **kwargs):
|
||||
"""Creates a client for the Google Base service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'base.google.com'.
|
||||
api_key: string (optional) The Google Base API key to use.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service='gbase', source=source,
|
||||
server=server, additional_headers=additional_headers, handler=handler,
|
||||
**kwargs)
|
||||
self.api_key = api_key
|
||||
|
||||
def _SetAPIKey(self, api_key):
|
||||
if not isinstance(self.additional_headers, dict):
|
||||
self.additional_headers = {}
|
||||
self.additional_headers['X-Google-Key'] = api_key
|
||||
|
||||
def __SetAPIKey(self, api_key):
|
||||
self._SetAPIKey(api_key)
|
||||
|
||||
def _GetAPIKey(self):
|
||||
if 'X-Google-Key' not in self.additional_headers:
|
||||
return None
|
||||
else:
|
||||
return self.additional_headers['X-Google-Key']
|
||||
|
||||
def __GetAPIKey(self):
|
||||
return self._GetAPIKey()
|
||||
|
||||
api_key = property(__GetAPIKey, __SetAPIKey,
|
||||
doc="""Get or set the API key to be included in all requests.""")
|
||||
|
||||
def Query(self, uri, converter=None):
|
||||
"""Performs a style query and returns a resulting feed or entry.
|
||||
|
||||
Args:
|
||||
uri: string The full URI which be queried. Examples include
|
||||
'/base/feeds/snippets?bq=digital+camera',
|
||||
'http://www.google.com/base/feeds/snippets?bq=digital+camera'
|
||||
'/base/feeds/items'
|
||||
I recommend creating a URI using a query class.
|
||||
converter: func (optional) A function which will be executed on the
|
||||
server's response. Examples include GBaseItemFromString, etc.
|
||||
|
||||
Returns:
|
||||
If converter was specified, returns the results of calling converter on
|
||||
the server's response. If converter was not specified, and the result
|
||||
was an Atom Entry, returns a GBaseItem, by default, the method returns
|
||||
the result of calling gdata.service's Get method.
|
||||
"""
|
||||
|
||||
result = self.Get(uri, converter=converter)
|
||||
if converter:
|
||||
return result
|
||||
elif isinstance(result, atom.Entry):
|
||||
return gdata.base.GBaseItemFromString(result.ToString())
|
||||
return result
|
||||
|
||||
def QuerySnippetsFeed(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseSnippetFeedFromString)
|
||||
|
||||
def QueryItemsFeed(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseItemFeedFromString)
|
||||
|
||||
def QueryAttributesFeed(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseAttributesFeedFromString)
|
||||
|
||||
def QueryItemTypesFeed(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseItemTypesFeedFromString)
|
||||
|
||||
def QueryLocalesFeed(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseLocalesFeedFromString)
|
||||
|
||||
def GetItem(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseItemFromString)
|
||||
|
||||
def GetSnippet(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseSnippetFromString)
|
||||
|
||||
def GetAttribute(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseAttributeEntryFromString)
|
||||
|
||||
def GetItemType(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GBaseItemTypeEntryFromString)
|
||||
|
||||
def GetLocale(self, uri):
|
||||
return self.Get(uri, converter=gdata.base.GDataEntryFromString)
|
||||
|
||||
def InsertItem(self, new_item, url_params=None, escape_params=True,
|
||||
converter=None):
|
||||
"""Adds an item to Google Base.
|
||||
|
||||
Args:
|
||||
new_item: atom.Entry or subclass A new item which is to be added to
|
||||
Google Base.
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
converter: func (optional) Function which is executed on the server's
|
||||
response before it is returned. Usually this is a function like
|
||||
GBaseItemFromString which will parse the response and turn it into
|
||||
an object.
|
||||
|
||||
Returns:
|
||||
If converter is defined, the results of running converter on the server's
|
||||
response. Otherwise, it will be a GBaseItem.
|
||||
"""
|
||||
|
||||
response = self.Post(new_item, '/base/feeds/items', url_params=url_params,
|
||||
escape_params=escape_params, converter=converter)
|
||||
|
||||
if not converter and isinstance(response, atom.Entry):
|
||||
return gdata.base.GBaseItemFromString(response.ToString())
|
||||
return response
|
||||
|
||||
def DeleteItem(self, item_id, url_params=None, escape_params=True):
|
||||
"""Removes an item with the specified ID from Google Base.
|
||||
|
||||
Args:
|
||||
item_id: string The ID of the item to be deleted. Example:
|
||||
'http://www.google.com/base/feeds/items/13185446517496042648'
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the deletion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
True if the delete succeeded.
|
||||
"""
|
||||
|
||||
return self.Delete('%s' % (item_id[len('http://www.google.com'):],),
|
||||
url_params=url_params, escape_params=escape_params)
|
||||
|
||||
def UpdateItem(self, item_id, updated_item, url_params=None,
|
||||
escape_params=True,
|
||||
converter=gdata.base.GBaseItemFromString):
|
||||
"""Updates an existing item.
|
||||
|
||||
Args:
|
||||
item_id: string The ID of the item to be updated. Example:
|
||||
'http://www.google.com/base/feeds/items/13185446517496042648'
|
||||
updated_item: atom.Entry, subclass, or string, containing
|
||||
the Atom Entry which will replace the base item which is
|
||||
stored at the item_id.
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the update request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
converter: func (optional) Function which is executed on the server's
|
||||
response before it is returned. Usually this is a function like
|
||||
GBaseItemFromString which will parse the response and turn it into
|
||||
an object.
|
||||
|
||||
Returns:
|
||||
If converter is defined, the results of running converter on the server's
|
||||
response. Otherwise, it will be a GBaseItem.
|
||||
"""
|
||||
|
||||
response = self.Put(updated_item,
|
||||
item_id, url_params=url_params, escape_params=escape_params,
|
||||
converter=converter)
|
||||
if not converter and isinstance(response, atom.Entry):
|
||||
return gdata.base.GBaseItemFromString(response.ToString())
|
||||
return response
|
||||
|
||||
def ExecuteBatch(self, batch_feed,
|
||||
converter=gdata.base.GBaseItemFeedFromString):
|
||||
"""Sends a batch request feed to the server.
|
||||
|
||||
Args:
|
||||
batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which
|
||||
contain the desired CRUD operation and any necessary entry data.
|
||||
converter: Function (optional) Function to be executed on the server's
|
||||
response. This function should take one string as a parameter. The
|
||||
default value is GBaseItemFeedFromString which will turn the result
|
||||
into a gdata.base.GBaseItem object.
|
||||
|
||||
Returns:
|
||||
A gdata.BatchFeed containing the results.
|
||||
"""
|
||||
|
||||
return self.Post(batch_feed, BASE_BATCH_URL, converter=converter)
|
||||
|
||||
|
||||
class BaseQuery(gdata.service.Query):
|
||||
|
||||
def _GetBaseQuery(self):
|
||||
return self['bq']
|
||||
|
||||
def _SetBaseQuery(self, base_query):
|
||||
self['bq'] = base_query
|
||||
|
||||
bq = property(_GetBaseQuery, _SetBaseQuery,
|
||||
doc="""The bq query parameter""")
|
||||
202
python/gdata/blogger/__init__.py
Normal file
202
python/gdata/blogger/__init__.py
Normal file
@@ -0,0 +1,202 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2007, 2008 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Contains extensions to Atom objects used with Blogger."""
|
||||
|
||||
|
||||
__author__ = 'api.jscudder (Jeffrey Scudder)'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
import re
|
||||
|
||||
|
||||
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
|
||||
THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0'
|
||||
|
||||
|
||||
class BloggerEntry(gdata.GDataEntry):
|
||||
"""Adds convenience methods inherited by all Blogger entries."""
|
||||
|
||||
blog_name_pattern = re.compile('(http://)(\w*)')
|
||||
blog_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
|
||||
blog_id2_pattern = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
|
||||
|
||||
def GetBlogId(self):
|
||||
"""Extracts the Blogger id of this blog.
|
||||
This method is useful when contructing URLs by hand. The blog id is
|
||||
often used in blogger operation URLs. This should not be confused with
|
||||
the id member of a BloggerBlog. The id element is the Atom id XML element.
|
||||
The blog id which this method returns is a part of the Atom id.
|
||||
|
||||
Returns:
|
||||
The blog's unique id as a string.
|
||||
"""
|
||||
if self.id.text:
|
||||
match = self.blog_id_pattern.match(self.id.text)
|
||||
if match:
|
||||
return match.group(2)
|
||||
else:
|
||||
return self.blog_id2_pattern.match(self.id.text).group(2)
|
||||
return None
|
||||
|
||||
def GetBlogName(self):
|
||||
"""Finds the name of this blog as used in the 'alternate' URL.
|
||||
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
|
||||
entry representing the above example, this method would return 'blogName'.
|
||||
|
||||
Returns:
|
||||
The blog's URL name component as a string.
|
||||
"""
|
||||
for link in self.link:
|
||||
if link.rel == 'alternate':
|
||||
return self.blog_name_pattern.match(link.href).group(2)
|
||||
return None
|
||||
|
||||
|
||||
class BlogEntry(BloggerEntry):
|
||||
"""Describes a blog entry in the feed listing a user's blogs."""
|
||||
|
||||
|
||||
def BlogEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BlogEntry, xml_string)
|
||||
|
||||
|
||||
class BlogFeed(gdata.GDataFeed):
|
||||
"""Describes a feed of a user's blogs."""
|
||||
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogEntry])
|
||||
|
||||
|
||||
def BlogFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BlogFeed, xml_string)
|
||||
|
||||
|
||||
class BlogPostEntry(BloggerEntry):
|
||||
"""Describes a blog post entry in the feed of a blog's posts."""
|
||||
|
||||
post_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
|
||||
|
||||
def AddLabel(self, label):
|
||||
"""Adds a label to the blog post.
|
||||
|
||||
The label is represented by an Atom category element, so this method
|
||||
is shorthand for appending a new atom.Category object.
|
||||
|
||||
Args:
|
||||
label: str
|
||||
"""
|
||||
self.category.append(atom.Category(scheme=LABEL_SCHEME, term=label))
|
||||
|
||||
def GetPostId(self):
|
||||
"""Extracts the postID string from the entry's Atom id.
|
||||
|
||||
Returns: A string of digits which identify this post within the blog.
|
||||
"""
|
||||
if self.id.text:
|
||||
return self.post_id_pattern.match(self.id.text).group(4)
|
||||
return None
|
||||
|
||||
|
||||
def BlogPostEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BlogPostEntry, xml_string)
|
||||
|
||||
|
||||
class BlogPostFeed(gdata.GDataFeed):
|
||||
"""Describes a feed of a blog's posts."""
|
||||
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogPostEntry])
|
||||
|
||||
|
||||
def BlogPostFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(BlogPostFeed, xml_string)
|
||||
|
||||
|
||||
class InReplyTo(atom.AtomBase):
|
||||
_tag = 'in-reply-to'
|
||||
_namespace = THR_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['href'] = 'href'
|
||||
_attributes['ref'] = 'ref'
|
||||
_attributes['source'] = 'source'
|
||||
_attributes['type'] = 'type'
|
||||
|
||||
def __init__(self, href=None, ref=None, source=None, type=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
self.href = href
|
||||
self.ref = ref
|
||||
self.source = source
|
||||
self.type = type
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
self.text = text
|
||||
|
||||
|
||||
def InReplyToFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(InReplyTo, xml_string)
|
||||
|
||||
|
||||
class CommentEntry(BloggerEntry):
|
||||
"""Describes a blog post comment entry in the feed of a blog post's
|
||||
comments."""
|
||||
|
||||
_children = BloggerEntry._children.copy()
|
||||
_children['{%s}in-reply-to' % THR_NAMESPACE] = ('in_reply_to', InReplyTo)
|
||||
|
||||
comment_id_pattern = re.compile('.*-(\w*)$')
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
contributor=None, atom_id=None, link=None, published=None, rights=None,
|
||||
source=None, summary=None, control=None, title=None, updated=None,
|
||||
in_reply_to=None, extension_elements=None, extension_attributes=None,
|
||||
text=None):
|
||||
BloggerEntry.__init__(self, author=author, category=category,
|
||||
content=content, contributor=contributor, atom_id=atom_id, link=link,
|
||||
published=published, rights=rights, source=source, summary=summary,
|
||||
control=control, title=title, updated=updated,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
self.in_reply_to = in_reply_to
|
||||
|
||||
def GetCommentId(self):
|
||||
"""Extracts the commentID string from the entry's Atom id.
|
||||
|
||||
Returns: A string of digits which identify this post within the blog.
|
||||
"""
|
||||
if self.id.text:
|
||||
return self.comment_id_pattern.match(self.id.text).group(1)
|
||||
return None
|
||||
|
||||
|
||||
def CommentEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(CommentEntry, xml_string)
|
||||
|
||||
|
||||
class CommentFeed(gdata.GDataFeed):
|
||||
"""Describes a feed of a blog post's comments."""
|
||||
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CommentEntry])
|
||||
|
||||
|
||||
def CommentFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(CommentFeed, xml_string)
|
||||
|
||||
|
||||
175
python/gdata/blogger/client.py
Normal file
175
python/gdata/blogger/client.py
Normal file
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Contains a client to communicate with the Blogger servers.
|
||||
|
||||
For documentation on the Blogger API, see:
|
||||
http://code.google.com/apis/blogger/
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import gdata.client
|
||||
import gdata.gauth
|
||||
import gdata.blogger.data
|
||||
import atom.data
|
||||
import atom.http_core
|
||||
|
||||
|
||||
# List user's blogs, takes a user ID, or 'default'.
|
||||
BLOGS_URL = 'http://www.blogger.com/feeds/%s/blogs'
|
||||
# Takes a blog ID.
|
||||
BLOG_POST_URL = 'http://www.blogger.com/feeds/%s/posts/default'
|
||||
# Takes a blog ID.
|
||||
BLOG_PAGE_URL = 'http://www.blogger.com/feeds/%s/pages/default'
|
||||
# Takes a blog ID and post ID.
|
||||
BLOG_POST_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/%s/comments/default'
|
||||
# Takes a blog ID.
|
||||
BLOG_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/comments/default'
|
||||
# Takes a blog ID.
|
||||
BLOG_ARCHIVE_URL = 'http://www.blogger.com/feeds/%s/archive/full'
|
||||
|
||||
|
||||
class BloggerClient(gdata.client.GDClient):
|
||||
api_version = '2'
|
||||
auth_service = 'blogger'
|
||||
auth_scopes = gdata.gauth.AUTH_SCOPES['blogger']
|
||||
|
||||
def get_blogs(self, user_id='default', auth_token=None,
|
||||
desired_class=gdata.blogger.data.BlogFeed, **kwargs):
|
||||
return self.get_feed(BLOGS_URL % user_id, auth_token=auth_token,
|
||||
desired_class=desired_class, **kwargs)
|
||||
|
||||
GetBlogs = get_blogs
|
||||
|
||||
def get_posts(self, blog_id, auth_token=None,
|
||||
desired_class=gdata.blogger.data.BlogPostFeed, query=None,
|
||||
**kwargs):
|
||||
return self.get_feed(BLOG_POST_URL % blog_id, auth_token=auth_token,
|
||||
desired_class=desired_class, query=query, **kwargs)
|
||||
|
||||
GetPosts = get_posts
|
||||
|
||||
def get_pages(self, blog_id, auth_token=None,
|
||||
desired_class=gdata.blogger.data.BlogPageFeed, query=None,
|
||||
**kwargs):
|
||||
return self.get_feed(BLOG_PAGE_URL % blog_id, auth_token=auth_token,
|
||||
desired_class=desired_class, query=query, **kwargs)
|
||||
|
||||
GetPages = get_pages
|
||||
|
||||
def get_post_comments(self, blog_id, post_id, auth_token=None,
|
||||
desired_class=gdata.blogger.data.CommentFeed,
|
||||
query=None, **kwargs):
|
||||
return self.get_feed(BLOG_POST_COMMENTS_URL % (blog_id, post_id),
|
||||
auth_token=auth_token, desired_class=desired_class,
|
||||
query=query, **kwargs)
|
||||
|
||||
GetPostComments = get_post_comments
|
||||
|
||||
def get_blog_comments(self, blog_id, auth_token=None,
|
||||
desired_class=gdata.blogger.data.CommentFeed,
|
||||
query=None, **kwargs):
|
||||
return self.get_feed(BLOG_COMMENTS_URL % blog_id, auth_token=auth_token,
|
||||
desired_class=desired_class, query=query, **kwargs)
|
||||
|
||||
GetBlogComments = get_blog_comments
|
||||
|
||||
def get_blog_archive(self, blog_id, auth_token=None, **kwargs):
|
||||
return self.get_feed(BLOG_ARCHIVE_URL % blog_id, auth_token=auth_token,
|
||||
**kwargs)
|
||||
|
||||
GetBlogArchive = get_blog_archive
|
||||
|
||||
def add_post(self, blog_id, title, body, labels=None, draft=False,
|
||||
auth_token=None, title_type='text', body_type='html', **kwargs):
|
||||
# Construct an atom Entry for the blog post to be sent to the server.
|
||||
new_entry = gdata.blogger.data.BlogPost(
|
||||
title=atom.data.Title(text=title, type=title_type),
|
||||
content=atom.data.Content(text=body, type=body_type))
|
||||
if labels:
|
||||
for label in labels:
|
||||
new_entry.add_label(label)
|
||||
if draft:
|
||||
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
|
||||
return self.post(new_entry, BLOG_POST_URL % blog_id, auth_token=auth_token, **kwargs)
|
||||
|
||||
AddPost = add_post
|
||||
|
||||
def add_page(self, blog_id, title, body, draft=False, auth_token=None,
|
||||
title_type='text', body_type='html', **kwargs):
|
||||
new_entry = gdata.blogger.data.BlogPage(
|
||||
title=atom.data.Title(text=title, type=title_type),
|
||||
content=atom.data.Content(text=body, type=body_type))
|
||||
if draft:
|
||||
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
|
||||
return self.post(new_entry, BLOG_PAGE_URL % blog_id, auth_token=auth_token, **kwargs)
|
||||
|
||||
AddPage = add_page
|
||||
|
||||
def add_comment(self, blog_id, post_id, body, auth_token=None,
|
||||
title_type='text', body_type='html', **kwargs):
|
||||
new_entry = gdata.blogger.data.Comment(
|
||||
content=atom.data.Content(text=body, type=body_type))
|
||||
return self.post(new_entry, BLOG_POST_COMMENTS_URL % (blog_id, post_id),
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
AddComment = add_comment
|
||||
|
||||
def update(self, entry, auth_token=None, **kwargs):
|
||||
# The Blogger API does not currently support ETags, so for now remove
|
||||
# the ETag before performing an update.
|
||||
old_etag = entry.etag
|
||||
entry.etag = None
|
||||
response = gdata.client.GDClient.update(self, entry,
|
||||
auth_token=auth_token, **kwargs)
|
||||
entry.etag = old_etag
|
||||
return response
|
||||
|
||||
Update = update
|
||||
|
||||
def delete(self, entry_or_uri, auth_token=None, **kwargs):
|
||||
if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)):
|
||||
return gdata.client.GDClient.delete(self, entry_or_uri,
|
||||
auth_token=auth_token, **kwargs)
|
||||
# The Blogger API does not currently support ETags, so for now remove
|
||||
# the ETag before performing a delete.
|
||||
old_etag = entry_or_uri.etag
|
||||
entry_or_uri.etag = None
|
||||
response = gdata.client.GDClient.delete(self, entry_or_uri,
|
||||
auth_token=auth_token, **kwargs)
|
||||
# TODO: if GDClient.delete raises and exception, the entry's etag may be
|
||||
# left as None. Should revisit this logic.
|
||||
entry_or_uri.etag = old_etag
|
||||
return response
|
||||
|
||||
Delete = delete
|
||||
|
||||
|
||||
class Query(gdata.client.Query):
|
||||
|
||||
def __init__(self, order_by=None, **kwargs):
|
||||
gdata.client.Query.__init__(self, **kwargs)
|
||||
self.order_by = order_by
|
||||
|
||||
def modify_request(self, http_request):
|
||||
gdata.client._add_query_param('orderby', self.order_by, http_request)
|
||||
gdata.client.Query.modify_request(self, http_request)
|
||||
|
||||
ModifyRequest = modify_request
|
||||
168
python/gdata/blogger/data.py
Normal file
168
python/gdata/blogger/data.py
Normal file
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Data model classes for parsing and generating XML for the Blogger API."""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
import atom.core
|
||||
import gdata.data
|
||||
|
||||
|
||||
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
|
||||
THR_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s'
|
||||
|
||||
BLOG_NAME_PATTERN = re.compile('(http://)(\w*)')
|
||||
BLOG_ID_PATTERN = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
|
||||
BLOG_ID2_PATTERN = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
|
||||
POST_ID_PATTERN = re.compile(
|
||||
'(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
|
||||
PAGE_ID_PATTERN = re.compile(
|
||||
'(tag:blogger.com,1999:blog-)(\w*)(.page-)(\w*)')
|
||||
COMMENT_ID_PATTERN = re.compile('.*-(\w*)$')
|
||||
|
||||
|
||||
class BloggerEntry(gdata.data.GDEntry):
|
||||
"""Adds convenience methods inherited by all Blogger entries."""
|
||||
|
||||
def get_blog_id(self):
|
||||
"""Extracts the Blogger id of this blog.
|
||||
|
||||
This method is useful when contructing URLs by hand. The blog id is
|
||||
often used in blogger operation URLs. This should not be confused with
|
||||
the id member of a BloggerBlog. The id element is the Atom id XML element.
|
||||
The blog id which this method returns is a part of the Atom id.
|
||||
|
||||
Returns:
|
||||
The blog's unique id as a string.
|
||||
"""
|
||||
if self.id.text:
|
||||
match = BLOG_ID_PATTERN.match(self.id.text)
|
||||
if match:
|
||||
return match.group(2)
|
||||
else:
|
||||
return BLOG_ID2_PATTERN.match(self.id.text).group(2)
|
||||
return None
|
||||
|
||||
GetBlogId = get_blog_id
|
||||
|
||||
def get_blog_name(self):
|
||||
"""Finds the name of this blog as used in the 'alternate' URL.
|
||||
|
||||
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
|
||||
entry representing the above example, this method would return 'blogName'.
|
||||
|
||||
Returns:
|
||||
The blog's URL name component as a string.
|
||||
"""
|
||||
for link in self.link:
|
||||
if link.rel == 'alternate':
|
||||
return urlparse.urlparse(link.href)[1].split(".", 1)[0]
|
||||
return None
|
||||
|
||||
GetBlogName = get_blog_name
|
||||
|
||||
|
||||
class Blog(BloggerEntry):
|
||||
"""Represents a blog which belongs to the user."""
|
||||
|
||||
|
||||
class BlogFeed(gdata.data.GDFeed):
|
||||
entry = [Blog]
|
||||
|
||||
|
||||
class BlogPost(BloggerEntry):
|
||||
"""Represents a single post on a blog."""
|
||||
|
||||
def add_label(self, label):
|
||||
"""Adds a label to the blog post.
|
||||
|
||||
The label is represented by an Atom category element, so this method
|
||||
is shorthand for appending a new atom.Category object.
|
||||
|
||||
Args:
|
||||
label: str
|
||||
"""
|
||||
self.category.append(atom.data.Category(scheme=LABEL_SCHEME, term=label))
|
||||
|
||||
AddLabel = add_label
|
||||
|
||||
def get_post_id(self):
|
||||
"""Extracts the postID string from the entry's Atom id.
|
||||
|
||||
Returns: A string of digits which identify this post within the blog.
|
||||
"""
|
||||
if self.id.text:
|
||||
return POST_ID_PATTERN.match(self.id.text).group(4)
|
||||
return None
|
||||
|
||||
GetPostId = get_post_id
|
||||
|
||||
|
||||
class BlogPostFeed(gdata.data.GDFeed):
|
||||
entry = [BlogPost]
|
||||
|
||||
|
||||
class BlogPage(BloggerEntry):
|
||||
"""Represents a single page on a blog."""
|
||||
|
||||
def get_page_id(self):
|
||||
"""Extracts the pageID string from entry's Atom id.
|
||||
|
||||
Returns: A string of digits which identify this post within the blog.
|
||||
"""
|
||||
if self.id.text:
|
||||
return PAGE_ID_PATTERN.match(self.id.text).group(4)
|
||||
return None
|
||||
|
||||
GetPageId = get_page_id
|
||||
|
||||
|
||||
class BlogPageFeed(gdata.data.GDFeed):
|
||||
entry = [BlogPage]
|
||||
|
||||
|
||||
class InReplyTo(atom.core.XmlElement):
|
||||
_qname = THR_TEMPLATE % 'in-reply-to'
|
||||
href = 'href'
|
||||
ref = 'ref'
|
||||
source = 'source'
|
||||
type = 'type'
|
||||
|
||||
|
||||
class Comment(BloggerEntry):
|
||||
"""Blog post comment entry in a feed listing comments on a post or blog."""
|
||||
in_reply_to = InReplyTo
|
||||
|
||||
def get_comment_id(self):
|
||||
"""Extracts the commentID string from the entry's Atom id.
|
||||
|
||||
Returns: A string of digits which identify this post within the blog.
|
||||
"""
|
||||
if self.id.text:
|
||||
return COMMENT_ID_PATTERN.match(self.id.text).group(1)
|
||||
return None
|
||||
|
||||
GetCommentId = get_comment_id
|
||||
|
||||
|
||||
class CommentFeed(gdata.data.GDFeed):
|
||||
entry = [Comment]
|
||||
142
python/gdata/blogger/service.py
Normal file
142
python/gdata/blogger/service.py
Normal file
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2007 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Classes to interact with the Blogger server."""
|
||||
|
||||
__author__ = 'api.jscudder (Jeffrey Scudder)'
|
||||
|
||||
import gdata.service
|
||||
import gdata.blogger
|
||||
|
||||
|
||||
class BloggerService(gdata.service.GDataService):
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server='www.blogger.com', **kwargs):
|
||||
"""Creates a client for the Blogger service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'www.blogger.com'.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service='blogger', source=source,
|
||||
server=server, **kwargs)
|
||||
|
||||
def GetBlogFeed(self, uri=None):
|
||||
"""Retrieve a list of the blogs to which the current user may manage."""
|
||||
if not uri:
|
||||
uri = '/feeds/default/blogs'
|
||||
return self.Get(uri, converter=gdata.blogger.BlogFeedFromString)
|
||||
|
||||
def GetBlogCommentFeed(self, blog_id=None, uri=None):
|
||||
"""Retrieve a list of the comments for this blog."""
|
||||
if blog_id:
|
||||
uri = '/feeds/%s/comments/default' % blog_id
|
||||
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
|
||||
|
||||
def GetBlogPostFeed(self, blog_id=None, uri=None):
|
||||
if blog_id:
|
||||
uri = '/feeds/%s/posts/default' % blog_id
|
||||
return self.Get(uri, converter=gdata.blogger.BlogPostFeedFromString)
|
||||
|
||||
def GetPostCommentFeed(self, blog_id=None, post_id=None, uri=None):
|
||||
"""Retrieve a list of the comments for this particular blog post."""
|
||||
if blog_id and post_id:
|
||||
uri = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
|
||||
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
|
||||
|
||||
def AddPost(self, entry, blog_id=None, uri=None):
|
||||
if blog_id:
|
||||
uri = '/feeds/%s/posts/default' % blog_id
|
||||
return self.Post(entry, uri,
|
||||
converter=gdata.blogger.BlogPostEntryFromString)
|
||||
|
||||
def UpdatePost(self, entry, uri=None):
|
||||
if not uri:
|
||||
uri = entry.GetEditLink().href
|
||||
return self.Put(entry, uri,
|
||||
converter=gdata.blogger.BlogPostEntryFromString)
|
||||
|
||||
def DeletePost(self, entry=None, uri=None):
|
||||
if not uri:
|
||||
uri = entry.GetEditLink().href
|
||||
return self.Delete(uri)
|
||||
|
||||
def AddComment(self, comment_entry, blog_id=None, post_id=None, uri=None):
|
||||
"""Adds a new comment to the specified blog post."""
|
||||
if blog_id and post_id:
|
||||
uri = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
|
||||
return self.Post(comment_entry, uri,
|
||||
converter=gdata.blogger.CommentEntryFromString)
|
||||
|
||||
def DeleteComment(self, entry=None, uri=None):
|
||||
if not uri:
|
||||
uri = entry.GetEditLink().href
|
||||
return self.Delete(uri)
|
||||
|
||||
|
||||
class BlogQuery(gdata.service.Query):
|
||||
|
||||
def __init__(self, feed=None, params=None, categories=None, blog_id=None):
|
||||
"""Constructs a query object for the list of a user's Blogger blogs.
|
||||
|
||||
Args:
|
||||
feed: str (optional) The beginning of the URL to be queried. If the
|
||||
feed is not set, and there is no blog_id passed in, the default
|
||||
value is used ('/feeds/default/blogs').
|
||||
params: dict (optional)
|
||||
categories: list (optional)
|
||||
blog_id: str (optional)
|
||||
"""
|
||||
if not feed and blog_id:
|
||||
feed = '/feeds/default/blogs/%s' % blog_id
|
||||
elif not feed:
|
||||
feed = '/feeds/default/blogs'
|
||||
gdata.service.Query.__init__(self, feed=feed, params=params,
|
||||
categories=categories)
|
||||
|
||||
|
||||
class BlogPostQuery(gdata.service.Query):
|
||||
|
||||
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
|
||||
post_id=None):
|
||||
if not feed and blog_id and post_id:
|
||||
feed = '/feeds/%s/posts/default/%s' % (blog_id, post_id)
|
||||
elif not feed and blog_id:
|
||||
feed = '/feeds/%s/posts/default' % blog_id
|
||||
gdata.service.Query.__init__(self, feed=feed, params=params,
|
||||
categories=categories)
|
||||
|
||||
|
||||
class BlogCommentQuery(gdata.service.Query):
|
||||
|
||||
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
|
||||
post_id=None, comment_id=None):
|
||||
if not feed and blog_id and comment_id:
|
||||
feed = '/feeds/%s/comments/default/%s' % (blog_id, comment_id)
|
||||
elif not feed and blog_id and post_id:
|
||||
feed = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
|
||||
elif not feed and blog_id:
|
||||
feed = '/feeds/%s/comments/default' % blog_id
|
||||
gdata.service.Query.__init__(self, feed=feed, params=params,
|
||||
categories=categories)
|
||||
473
python/gdata/books/__init__.py
Normal file
473
python/gdata/books/__init__.py
Normal file
@@ -0,0 +1,473 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""
|
||||
Data Models for books.service
|
||||
|
||||
All classes can be instantiated from an xml string using their FromString
|
||||
class method.
|
||||
|
||||
Notes:
|
||||
* Book.title displays the first dc:title because the returned XML
|
||||
repeats that datum as atom:title.
|
||||
There is an undocumented gbs:openAccess element that is not parsed.
|
||||
"""
|
||||
|
||||
__author__ = "James Sams <sams.james@gmail.com>"
|
||||
__copyright__ = "Apache License v2.0"
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
BOOK_SEARCH_NAMESPACE = 'http://schemas.google.com/books/2008'
|
||||
DC_NAMESPACE = 'http://purl.org/dc/terms'
|
||||
ANNOTATION_REL = "http://schemas.google.com/books/2008/annotation"
|
||||
INFO_REL = "http://schemas.google.com/books/2008/info"
|
||||
LABEL_SCHEME = "http://schemas.google.com/books/2008/labels"
|
||||
PREVIEW_REL = "http://schemas.google.com/books/2008/preview"
|
||||
THUMBNAIL_REL = "http://schemas.google.com/books/2008/thumbnail"
|
||||
FULL_VIEW = "http://schemas.google.com/books/2008#view_all_pages"
|
||||
PARTIAL_VIEW = "http://schemas.google.com/books/2008#view_partial"
|
||||
NO_VIEW = "http://schemas.google.com/books/2008#view_no_pages"
|
||||
UNKNOWN_VIEW = "http://schemas.google.com/books/2008#view_unknown"
|
||||
EMBEDDABLE = "http://schemas.google.com/books/2008#embeddable"
|
||||
NOT_EMBEDDABLE = "http://schemas.google.com/books/2008#not_embeddable"
|
||||
|
||||
|
||||
|
||||
class _AtomFromString(atom.AtomBase):
|
||||
|
||||
#@classmethod
|
||||
def FromString(cls, s):
|
||||
return atom.CreateClassFromXMLString(cls, s)
|
||||
|
||||
FromString = classmethod(FromString)
|
||||
|
||||
|
||||
class Creator(_AtomFromString):
|
||||
"""
|
||||
The <dc:creator> element identifies an author-or more generally, an entity
|
||||
responsible for creating the volume in question. Examples of a creator
|
||||
include a person, an organization, or a service. In the case of
|
||||
anthologies, proceedings, or other edited works, this field may be used to
|
||||
indicate editors or other entities responsible for collecting the volume's
|
||||
contents.
|
||||
|
||||
This element appears as a child of <entry>. If there are multiple authors or
|
||||
contributors to the book, there may be multiple <dc:creator> elements in the
|
||||
volume entry (one for each creator or contributor).
|
||||
"""
|
||||
|
||||
_tag = 'creator'
|
||||
_namespace = DC_NAMESPACE
|
||||
|
||||
|
||||
class Date(_AtomFromString): #iso 8601 / W3CDTF profile
|
||||
"""
|
||||
The <dc:date> element indicates the publication date of the specific volume
|
||||
in question. If the book is a reprint, this is the reprint date, not the
|
||||
original publication date. The date is encoded according to the ISO-8601
|
||||
standard (and more specifically, the W3CDTF profile).
|
||||
|
||||
The <dc:date> element can appear only as a child of <entry>.
|
||||
|
||||
Usually only the year or the year and the month are given.
|
||||
|
||||
YYYY-MM-DDThh:mm:ssTZD TZD = -hh:mm or +hh:mm
|
||||
"""
|
||||
|
||||
_tag = 'date'
|
||||
_namespace = DC_NAMESPACE
|
||||
|
||||
|
||||
class Description(_AtomFromString):
|
||||
"""
|
||||
The <dc:description> element includes text that describes a book or book
|
||||
result. In a search result feed, this may be a search result "snippet" that
|
||||
contains the words around the user's search term. For a single volume feed,
|
||||
this element may contain a synopsis of the book.
|
||||
|
||||
The <dc:description> element can appear only as a child of <entry>
|
||||
"""
|
||||
|
||||
_tag = 'description'
|
||||
_namespace = DC_NAMESPACE
|
||||
|
||||
|
||||
class Format(_AtomFromString):
|
||||
"""
|
||||
The <dc:format> element describes the physical properties of the volume.
|
||||
Currently, it indicates the number of pages in the book, but more
|
||||
information may be added to this field in the future.
|
||||
|
||||
This element can appear only as a child of <entry>.
|
||||
"""
|
||||
|
||||
_tag = 'format'
|
||||
_namespace = DC_NAMESPACE
|
||||
|
||||
|
||||
class Identifier(_AtomFromString):
|
||||
"""
|
||||
The <dc:identifier> element provides an unambiguous reference to a
|
||||
particular book.
|
||||
* Every <entry> contains at least one <dc:identifier> child.
|
||||
* The first identifier is always the unique string Book Search has assigned
|
||||
to the volume (such as s1gVAAAAYAAJ). This is the ID that appears in the
|
||||
book's URL in the Book Search GUI, as well as in the URL of that book's
|
||||
single item feed.
|
||||
* Many books contain additional <dc:identifier> elements. These provide
|
||||
alternate, external identifiers to the volume. Such identifiers may
|
||||
include the ISBNs, ISSNs, Library of Congress Control Numbers (LCCNs),
|
||||
and OCLC numbers; they are prepended with a corresponding namespace
|
||||
prefix (such as "ISBN:").
|
||||
* Any <dc:identifier> can be passed to the Dynamic Links, used to
|
||||
instantiate an Embedded Viewer, or even used to construct static links to
|
||||
Book Search.
|
||||
The <dc:identifier> element can appear only as a child of <entry>.
|
||||
"""
|
||||
|
||||
_tag = 'identifier'
|
||||
_namespace = DC_NAMESPACE
|
||||
|
||||
|
||||
class Publisher(_AtomFromString):
|
||||
"""
|
||||
The <dc:publisher> element contains the name of the entity responsible for
|
||||
producing and distributing the volume (usually the specific edition of this
|
||||
book). Examples of a publisher include a person, an organization, or a
|
||||
service.
|
||||
|
||||
This element can appear only as a child of <entry>. If there is more than
|
||||
one publisher, multiple <dc:publisher> elements may appear.
|
||||
"""
|
||||
|
||||
_tag = 'publisher'
|
||||
_namespace = DC_NAMESPACE
|
||||
|
||||
|
||||
class Subject(_AtomFromString):
|
||||
"""
|
||||
The <dc:subject> element identifies the topic of the book. Usually this is
|
||||
a Library of Congress Subject Heading (LCSH) or Book Industry Standards
|
||||
and Communications Subject Heading (BISAC).
|
||||
|
||||
The <dc:subject> element can appear only as a child of <entry>. There may
|
||||
be multiple <dc:subject> elements per entry.
|
||||
"""
|
||||
|
||||
_tag = 'subject'
|
||||
_namespace = DC_NAMESPACE
|
||||
|
||||
|
||||
class Title(_AtomFromString):
|
||||
"""
|
||||
The <dc:title> element contains the title of a book as it was published. If
|
||||
a book has a subtitle, it appears as a second <dc:title> element in the book
|
||||
result's <entry>.
|
||||
"""
|
||||
|
||||
_tag = 'title'
|
||||
_namespace = DC_NAMESPACE
|
||||
|
||||
|
||||
class Viewability(_AtomFromString):
|
||||
"""
|
||||
Google Book Search respects the user's local copyright restrictions. As a
|
||||
result, previews or full views of some books are not available in all
|
||||
locations. The <gbs:viewability> element indicates whether a book is fully
|
||||
viewable, can be previewed, or only has "about the book" information. These
|
||||
three "viewability modes" are the same ones returned by the Dynamic Links
|
||||
API.
|
||||
|
||||
The <gbs:viewability> element can appear only as a child of <entry>.
|
||||
|
||||
The value attribute will take the form of the following URIs to represent
|
||||
the relevant viewing capability:
|
||||
|
||||
Full View: http://schemas.google.com/books/2008#view_all_pages
|
||||
Limited Preview: http://schemas.google.com/books/2008#view_partial
|
||||
Snippet View/No Preview: http://schemas.google.com/books/2008#view_no_pages
|
||||
Unknown view: http://schemas.google.com/books/2008#view_unknown
|
||||
"""
|
||||
|
||||
_tag = 'viewability'
|
||||
_namespace = BOOK_SEARCH_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, value=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
self.value = value
|
||||
_AtomFromString.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
|
||||
class Embeddability(_AtomFromString):
|
||||
"""
|
||||
Many of the books found on Google Book Search can be embedded on third-party
|
||||
sites using the Embedded Viewer. The <gbs:embeddability> element indicates
|
||||
whether a particular book result is available for embedding. By definition,
|
||||
a book that cannot be previewed on Book Search cannot be embedded on third-
|
||||
party sites.
|
||||
|
||||
The <gbs:embeddability> element can appear only as a child of <entry>.
|
||||
|
||||
The value attribute will take on one of the following URIs:
|
||||
embeddable: http://schemas.google.com/books/2008#embeddable
|
||||
not embeddable: http://schemas.google.com/books/2008#not_embeddable
|
||||
"""
|
||||
|
||||
_tag = 'embeddability'
|
||||
_namespace = BOOK_SEARCH_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, value=None, text=None, extension_elements=None,
|
||||
extension_attributes=None):
|
||||
self.value = value
|
||||
_AtomFromString.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
|
||||
class Review(_AtomFromString):
|
||||
"""
|
||||
When present, the <gbs:review> element contains a user-generated review for
|
||||
a given book. This element currently appears only in the user library and
|
||||
user annotation feeds, as a child of <entry>.
|
||||
|
||||
type: text, html, xhtml
|
||||
xml:lang: id of the language, a guess, (always two letters?)
|
||||
"""
|
||||
|
||||
_tag = 'review'
|
||||
_namespace = BOOK_SEARCH_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['type'] = 'type'
|
||||
_attributes['{http://www.w3.org/XML/1998/namespace}lang'] = 'lang'
|
||||
|
||||
def __init__(self, type=None, lang=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
self.type = type
|
||||
self.lang = lang
|
||||
_AtomFromString.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
|
||||
class Rating(_AtomFromString):
|
||||
"""All attributes must take an integral string between 1 and 5.
|
||||
The min, max, and average attributes represent 'community' ratings. The
|
||||
value attribute is the user's (of the feed from which the item is fetched,
|
||||
not necessarily the authenticated user) rating of the book.
|
||||
"""
|
||||
|
||||
_tag = 'rating'
|
||||
_namespace = gdata.GDATA_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['min'] = 'min'
|
||||
_attributes['max'] = 'max'
|
||||
_attributes['average'] = 'average'
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, min=None, max=None, average=None, value=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
self.min = min
|
||||
self.max = max
|
||||
self.average = average
|
||||
self.value = value
|
||||
_AtomFromString.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
|
||||
class Book(_AtomFromString, gdata.GDataEntry):
|
||||
"""
|
||||
Represents an <entry> from either a search, annotation, library, or single
|
||||
item feed. Note that dc_title attribute is the proper title of the volume,
|
||||
title is an atom element and may not represent the full title.
|
||||
"""
|
||||
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
for i in (Creator, Identifier, Publisher, Subject,):
|
||||
_children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, [i])
|
||||
for i in (Date, Description, Format, Viewability, Embeddability,
|
||||
Review, Rating): # Review, Rating maybe only in anno/lib entrys
|
||||
_children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, i)
|
||||
# there is an atom title as well, should we clobber that?
|
||||
del(i)
|
||||
_children['{%s}%s' % (Title._namespace, Title._tag)] = ('dc_title', [Title])
|
||||
|
||||
def to_dict(self):
|
||||
"""Returns a dictionary of the book's available metadata. If the data
|
||||
cannot be discovered, it is not included as a key in the returned dict.
|
||||
The possible keys are: authors, embeddability, date, description,
|
||||
format, identifiers, publishers, rating, review, subjects, title, and
|
||||
viewability.
|
||||
|
||||
Notes:
|
||||
* Plural keys will be lists
|
||||
* Singular keys will be strings
|
||||
* Title, despite usually being a list, joins the title and subtitle
|
||||
with a space as a single string.
|
||||
* embeddability and viewability only return the portion of the URI
|
||||
after #
|
||||
* identifiers is a list of tuples, where the first item of each tuple
|
||||
is the type of identifier and the second item is the identifying
|
||||
string. Note that while doing dict() on this tuple may be possible,
|
||||
some items may have multiple of the same identifier and converting
|
||||
to a dict may resulted in collisions/dropped data.
|
||||
* Rating returns only the user's rating. See Rating class for precise
|
||||
definition.
|
||||
"""
|
||||
d = {}
|
||||
if self.GetAnnotationLink():
|
||||
d['annotation'] = self.GetAnnotationLink().href
|
||||
if self.creator:
|
||||
d['authors'] = [x.text for x in self.creator]
|
||||
if self.embeddability:
|
||||
d['embeddability'] = self.embeddability.value.split('#')[-1]
|
||||
if self.date:
|
||||
d['date'] = self.date.text
|
||||
if self.description:
|
||||
d['description'] = self.description.text
|
||||
if self.format:
|
||||
d['format'] = self.format.text
|
||||
if self.identifier:
|
||||
d['identifiers'] = [('google_id', self.identifier[0].text)]
|
||||
for x in self.identifier[1:]:
|
||||
l = x.text.split(':') # should we lower the case of the ids?
|
||||
d['identifiers'].append((l[0], ':'.join(l[1:])))
|
||||
if self.GetInfoLink():
|
||||
d['info'] = self.GetInfoLink().href
|
||||
if self.GetPreviewLink():
|
||||
d['preview'] = self.GetPreviewLink().href
|
||||
if self.publisher:
|
||||
d['publishers'] = [x.text for x in self.publisher]
|
||||
if self.rating:
|
||||
d['rating'] = self.rating.value
|
||||
if self.review:
|
||||
d['review'] = self.review.text
|
||||
if self.subject:
|
||||
d['subjects'] = [x.text for x in self.subject]
|
||||
if self.GetThumbnailLink():
|
||||
d['thumbnail'] = self.GetThumbnailLink().href
|
||||
if self.dc_title:
|
||||
d['title'] = ' '.join([x.text for x in self.dc_title])
|
||||
if self.viewability:
|
||||
d['viewability'] = self.viewability.value.split('#')[-1]
|
||||
return d
|
||||
|
||||
def __init__(self, creator=None, date=None,
|
||||
description=None, format=None, author=None, identifier=None,
|
||||
publisher=None, subject=None, dc_title=None, viewability=None,
|
||||
embeddability=None, review=None, rating=None, category=None,
|
||||
content=None, contributor=None, atom_id=None, link=None,
|
||||
published=None, rights=None, source=None, summary=None,
|
||||
title=None, control=None, updated=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
self.creator = creator
|
||||
self.date = date
|
||||
self.description = description
|
||||
self.format = format
|
||||
self.identifier = identifier
|
||||
self.publisher = publisher
|
||||
self.subject = subject
|
||||
self.dc_title = dc_title or []
|
||||
self.viewability = viewability
|
||||
self.embeddability = embeddability
|
||||
self.review = review
|
||||
self.rating = rating
|
||||
gdata.GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content, contributor=contributor, atom_id=atom_id,
|
||||
link=link, published=published, rights=rights, source=source,
|
||||
summary=summary, title=title, control=control, updated=updated,
|
||||
text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
|
||||
def GetThumbnailLink(self):
|
||||
"""Returns the atom.Link object representing the thumbnail URI."""
|
||||
for i in self.link:
|
||||
if i.rel == THUMBNAIL_REL:
|
||||
return i
|
||||
|
||||
def GetInfoLink(self):
|
||||
"""
|
||||
Returns the atom.Link object representing the human-readable info URI.
|
||||
"""
|
||||
for i in self.link:
|
||||
if i.rel == INFO_REL:
|
||||
return i
|
||||
|
||||
def GetPreviewLink(self):
|
||||
"""Returns the atom.Link object representing the preview URI."""
|
||||
for i in self.link:
|
||||
if i.rel == PREVIEW_REL:
|
||||
return i
|
||||
|
||||
def GetAnnotationLink(self):
|
||||
"""
|
||||
Returns the atom.Link object representing the Annotation URI.
|
||||
Note that the use of www.books in the href of this link seems to make
|
||||
this information useless. Using books.service.ANNOTATION_FEED and
|
||||
BOOK_SERVER to construct your URI seems to work better.
|
||||
"""
|
||||
for i in self.link:
|
||||
if i.rel == ANNOTATION_REL:
|
||||
return i
|
||||
|
||||
def set_rating(self, value):
|
||||
"""Set user's rating. Must be an integral string between 1 nad 5"""
|
||||
assert (value in ('1','2','3','4','5'))
|
||||
if not isinstance(self.rating, Rating):
|
||||
self.rating = Rating()
|
||||
self.rating.value = value
|
||||
|
||||
def set_review(self, text, type='text', lang='en'):
|
||||
"""Set user's review text"""
|
||||
self.review = Review(text=text, type=type, lang=lang)
|
||||
|
||||
def get_label(self):
|
||||
"""Get users label for the item as a string"""
|
||||
for i in self.category:
|
||||
if i.scheme == LABEL_SCHEME:
|
||||
return i.term
|
||||
|
||||
def set_label(self, term):
|
||||
"""Clear pre-existing label for the item and set term as the label."""
|
||||
self.remove_label()
|
||||
self.category.append(atom.Category(term=term, scheme=LABEL_SCHEME))
|
||||
|
||||
def remove_label(self):
|
||||
"""Clear the user's label for the item"""
|
||||
ln = len(self.category)
|
||||
for i, j in enumerate(self.category[::-1]):
|
||||
if j.scheme == LABEL_SCHEME:
|
||||
del(self.category[ln-1-i])
|
||||
|
||||
def clean_annotations(self):
|
||||
"""Clear all annotations from an item. Useful for taking an item from
|
||||
another user's library/annotation feed and adding it to the
|
||||
authenticated user's library without adopting annotations."""
|
||||
self.remove_label()
|
||||
self.review = None
|
||||
self.rating = None
|
||||
|
||||
|
||||
def get_google_id(self):
|
||||
"""Get Google's ID of the item."""
|
||||
return self.id.text.split('/')[-1]
|
||||
|
||||
|
||||
class BookFeed(_AtomFromString, gdata.GDataFeed):
|
||||
"""Represents a feed of entries from a search."""
|
||||
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_children['{%s}%s' % (Book._namespace, Book._tag)] = (Book._tag, [Book])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import doctest
|
||||
doctest.testfile('datamodels.txt')
|
||||
90
python/gdata/books/data.py
Normal file
90
python/gdata/books/data.py
Normal file
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains the data classes of the Google Book Search Data API"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
import atom.data
|
||||
import gdata.data
|
||||
import gdata.dublincore.data
|
||||
import gdata.opensearch.data
|
||||
|
||||
|
||||
GBS_TEMPLATE = '{http://schemas.google.com/books/2008/}%s'
|
||||
|
||||
|
||||
class CollectionEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry in a feed of collections."""
|
||||
|
||||
|
||||
class CollectionFeed(gdata.data.BatchFeed):
|
||||
"""Describes a Book Search collection feed."""
|
||||
entry = [CollectionEntry]
|
||||
|
||||
|
||||
class Embeddability(atom.core.XmlElement):
|
||||
"""Describes an embeddability."""
|
||||
_qname = GBS_TEMPLATE % 'embeddability'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class OpenAccess(atom.core.XmlElement):
|
||||
"""Describes an open access."""
|
||||
_qname = GBS_TEMPLATE % 'openAccess'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class Review(atom.core.XmlElement):
|
||||
"""User-provided review."""
|
||||
_qname = GBS_TEMPLATE % 'review'
|
||||
lang = 'lang'
|
||||
type = 'type'
|
||||
|
||||
|
||||
class Viewability(atom.core.XmlElement):
|
||||
"""Describes a viewability."""
|
||||
_qname = GBS_TEMPLATE % 'viewability'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class VolumeEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry in a feed of Book Search volumes."""
|
||||
comments = gdata.data.Comments
|
||||
language = [gdata.dublincore.data.Language]
|
||||
open_access = OpenAccess
|
||||
format = [gdata.dublincore.data.Format]
|
||||
dc_title = [gdata.dublincore.data.Title]
|
||||
viewability = Viewability
|
||||
embeddability = Embeddability
|
||||
creator = [gdata.dublincore.data.Creator]
|
||||
rating = gdata.data.Rating
|
||||
description = [gdata.dublincore.data.Description]
|
||||
publisher = [gdata.dublincore.data.Publisher]
|
||||
date = [gdata.dublincore.data.Date]
|
||||
subject = [gdata.dublincore.data.Subject]
|
||||
identifier = [gdata.dublincore.data.Identifier]
|
||||
review = Review
|
||||
|
||||
|
||||
class VolumeFeed(gdata.data.BatchFeed):
|
||||
"""Describes a Book Search volume feed."""
|
||||
entry = [VolumeEntry]
|
||||
|
||||
|
||||
266
python/gdata/books/service.py
Normal file
266
python/gdata/books/service.py
Normal file
@@ -0,0 +1,266 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""
|
||||
Extend gdata.service.GDataService to support authenticated CRUD ops on
|
||||
Books API
|
||||
|
||||
http://code.google.com/apis/books/docs/getting-started.html
|
||||
http://code.google.com/apis/books/docs/gdata/developers_guide_protocol.html
|
||||
|
||||
TODO: (here and __init__)
|
||||
* search based on label, review, or other annotations (possible?)
|
||||
* edit (specifically, Put requests) seem to fail effect a change
|
||||
|
||||
Problems With API:
|
||||
* Adding a book with a review to the library adds a note, not a review.
|
||||
This does not get included in the returned item. You see this by
|
||||
looking at My Library through the website.
|
||||
* Editing a review never edits a review (unless it is freshly added, but
|
||||
see above). More generally,
|
||||
* a Put request with changed annotations (label/rating/review) does NOT
|
||||
change the data. Note: Put requests only work on the href from
|
||||
GetEditLink (as per the spec). Do not try to PUT to the annotate or
|
||||
library feeds, this will cause a 400 Invalid URI Bad Request response.
|
||||
Attempting to Post to one of the feeds with the updated annotations
|
||||
does not update them. See the following for (hopefully) a follow up:
|
||||
google.com/support/forum/p/booksearch-apis/thread?tid=27fd7f68de438fc8
|
||||
* Attempts to workaround the edit problem continue to fail. For example,
|
||||
removing the item, editing the data, readding the item, gives us only
|
||||
our originally added data (annotations). This occurs even if we
|
||||
completely shut python down, refetch the book from the public feed,
|
||||
and re-add it. There is some kind of persistence going on that I
|
||||
cannot change. This is likely due to the annotations being cached in
|
||||
the annotation feed and the inability to edit (see Put, above)
|
||||
* GetAnnotationLink has www.books.... as the server, but hitting www...
|
||||
results in a bad URI error.
|
||||
* Spec indicates there may be multiple labels, but there does not seem
|
||||
to be a way to get the server to accept multiple labels, nor does the
|
||||
web interface have an obvious way to have multiple labels. Multiple
|
||||
labels are never returned.
|
||||
"""
|
||||
|
||||
__author__ = "James Sams <sams.james@gmail.com>"
|
||||
__copyright__ = "Apache License v2.0"
|
||||
|
||||
from shlex import split
|
||||
|
||||
import gdata.service
|
||||
try:
|
||||
import books
|
||||
except ImportError:
|
||||
import gdata.books as books
|
||||
|
||||
|
||||
BOOK_SERVER = "books.google.com"
|
||||
GENERAL_FEED = "/books/feeds/volumes"
|
||||
ITEM_FEED = "/books/feeds/volumes/"
|
||||
LIBRARY_FEED = "/books/feeds/users/%s/collections/library/volumes"
|
||||
ANNOTATION_FEED = "/books/feeds/users/%s/volumes"
|
||||
PARTNER_FEED = "/books/feeds/p/%s/volumes"
|
||||
BOOK_SERVICE = "print"
|
||||
ACCOUNT_TYPE = "HOSTED_OR_GOOGLE"
|
||||
|
||||
|
||||
class BookService(gdata.service.GDataService):
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server=BOOK_SERVER, account_type=ACCOUNT_TYPE,
|
||||
exception_handlers=tuple(), **kwargs):
|
||||
"""source should be of form 'ProgramCompany - ProgramName - Version'"""
|
||||
|
||||
gdata.service.GDataService.__init__(self, email=email,
|
||||
password=password, service=BOOK_SERVICE, source=source,
|
||||
server=server, **kwargs)
|
||||
self.exception_handlers = exception_handlers
|
||||
|
||||
def search(self, q, start_index="1", max_results="10",
|
||||
min_viewability="none", feed=GENERAL_FEED,
|
||||
converter=books.BookFeed.FromString):
|
||||
"""
|
||||
Query the Public search feed. q is either a search string or a
|
||||
gdata.service.Query instance with a query set.
|
||||
|
||||
min_viewability must be "none", "partial", or "full".
|
||||
|
||||
If you change the feed to a single item feed, note that you will
|
||||
probably need to change the converter to be Book.FromString
|
||||
"""
|
||||
|
||||
if not isinstance(q, gdata.service.Query):
|
||||
q = gdata.service.Query(text_query=q)
|
||||
if feed:
|
||||
q.feed = feed
|
||||
q['start-index'] = start_index
|
||||
q['max-results'] = max_results
|
||||
q['min-viewability'] = min_viewability
|
||||
return self.Get(uri=q.ToUri(),converter=converter)
|
||||
|
||||
def search_by_keyword(self, q='', feed=GENERAL_FEED, start_index="1",
|
||||
max_results="10", min_viewability="none", **kwargs):
|
||||
"""
|
||||
Query the Public Search Feed by keyword. Non-keyword strings can be
|
||||
set in q. This is quite fragile. Is there a function somewhere in
|
||||
the Google library that will parse a query the same way that Google
|
||||
does?
|
||||
|
||||
Legal Identifiers are listed below and correspond to their meaning
|
||||
at http://books.google.com/advanced_book_search:
|
||||
all_words
|
||||
exact_phrase
|
||||
at_least_one
|
||||
without_words
|
||||
title
|
||||
author
|
||||
publisher
|
||||
subject
|
||||
isbn
|
||||
lccn
|
||||
oclc
|
||||
seemingly unsupported:
|
||||
publication_date: a sequence of two, two tuples:
|
||||
((min_month,min_year),(max_month,max_year))
|
||||
where month is one/two digit month, year is 4 digit, eg:
|
||||
(('1','2000'),('10','2003')). Lower bound is inclusive,
|
||||
upper bound is exclusive
|
||||
"""
|
||||
|
||||
for k, v in kwargs.items():
|
||||
if not v:
|
||||
continue
|
||||
k = k.lower()
|
||||
if k == 'all_words':
|
||||
q = "%s %s" % (q, v)
|
||||
elif k == 'exact_phrase':
|
||||
q = '%s "%s"' % (q, v.strip('"'))
|
||||
elif k == 'at_least_one':
|
||||
q = '%s %s' % (q, ' '.join(['OR "%s"' % x for x in split(v)]))
|
||||
elif k == 'without_words':
|
||||
q = '%s %s' % (q, ' '.join(['-"%s"' % x for x in split(v)]))
|
||||
elif k in ('author','title', 'publisher'):
|
||||
q = '%s %s' % (q, ' '.join(['in%s:"%s"'%(k,x) for x in split(v)]))
|
||||
elif k == 'subject':
|
||||
q = '%s %s' % (q, ' '.join(['%s:"%s"' % (k,x) for x in split(v)]))
|
||||
elif k == 'isbn':
|
||||
q = '%s ISBN%s' % (q, v)
|
||||
elif k == 'issn':
|
||||
q = '%s ISSN%s' % (q,v)
|
||||
elif k == 'oclc':
|
||||
q = '%s OCLC%s' % (q,v)
|
||||
else:
|
||||
raise ValueError("Unsupported search keyword")
|
||||
return self.search(q.strip(),start_index=start_index, feed=feed,
|
||||
max_results=max_results,
|
||||
min_viewability=min_viewability)
|
||||
|
||||
def search_library(self, q, id='me', **kwargs):
|
||||
"""Like search, but in a library feed. Default is the authenticated
|
||||
user's feed. Change by setting id."""
|
||||
|
||||
if 'feed' in kwargs:
|
||||
raise ValueError("kwarg 'feed' conflicts with library_id")
|
||||
feed = LIBRARY_FEED % id
|
||||
return self.search(q, feed=feed, **kwargs)
|
||||
|
||||
def search_library_by_keyword(self, id='me', **kwargs):
|
||||
"""Hybrid of search_by_keyword and search_library
|
||||
"""
|
||||
|
||||
if 'feed' in kwargs:
|
||||
raise ValueError("kwarg 'feed' conflicts with library_id")
|
||||
feed = LIBRARY_FEED % id
|
||||
return self.search_by_keyword(feed=feed,**kwargs)
|
||||
|
||||
def search_annotations(self, q, id='me', **kwargs):
|
||||
"""Like search, but in an annotation feed. Default is the authenticated
|
||||
user's feed. Change by setting id."""
|
||||
|
||||
if 'feed' in kwargs:
|
||||
raise ValueError("kwarg 'feed' conflicts with library_id")
|
||||
feed = ANNOTATION_FEED % id
|
||||
return self.search(q, feed=feed, **kwargs)
|
||||
|
||||
def search_annotations_by_keyword(self, id='me', **kwargs):
|
||||
"""Hybrid of search_by_keyword and search_annotations
|
||||
"""
|
||||
|
||||
if 'feed' in kwargs:
|
||||
raise ValueError("kwarg 'feed' conflicts with library_id")
|
||||
feed = ANNOTATION_FEED % id
|
||||
return self.search_by_keyword(feed=feed,**kwargs)
|
||||
|
||||
def add_item_to_library(self, item):
|
||||
"""Add the item, either an XML string or books.Book instance, to the
|
||||
user's library feed"""
|
||||
|
||||
feed = LIBRARY_FEED % 'me'
|
||||
return self.Post(data=item, uri=feed, converter=books.Book.FromString)
|
||||
|
||||
def remove_item_from_library(self, item):
|
||||
"""
|
||||
Remove the item, a books.Book instance, from the authenticated user's
|
||||
library feed. Using an item retrieved from a public search will fail.
|
||||
"""
|
||||
|
||||
return self.Delete(item.GetEditLink().href)
|
||||
|
||||
def add_annotation(self, item):
|
||||
"""
|
||||
Add the item, either an XML string or books.Book instance, to the
|
||||
user's annotation feed.
|
||||
"""
|
||||
# do not use GetAnnotationLink, results in 400 Bad URI due to www
|
||||
return self.Post(data=item, uri=ANNOTATION_FEED % 'me',
|
||||
converter=books.Book.FromString)
|
||||
|
||||
def edit_annotation(self, item):
|
||||
"""
|
||||
Send an edited item, a books.Book instance, to the user's annotation
|
||||
feed. Note that whereas extra annotations in add_annotations, minus
|
||||
ratings which are immutable once set, are simply added to the item in
|
||||
the annotation feed, if an annotation has been removed from the item,
|
||||
sending an edit request will remove that annotation. This should not
|
||||
happen with add_annotation.
|
||||
"""
|
||||
|
||||
return self.Put(data=item, uri=item.GetEditLink().href,
|
||||
converter=books.Book.FromString)
|
||||
|
||||
def get_by_google_id(self, id):
|
||||
return self.Get(ITEM_FEED + id, converter=books.Book.FromString)
|
||||
|
||||
def get_library(self, id='me',feed=LIBRARY_FEED, start_index="1",
|
||||
max_results="100", min_viewability="none",
|
||||
converter=books.BookFeed.FromString):
|
||||
"""
|
||||
Return a generator object that will return gbook.Book instances until
|
||||
the search feed no longer returns an item from the GetNextLink method.
|
||||
Thus max_results is not the maximum number of items that will be
|
||||
returned, but rather the number of items per page of searches. This has
|
||||
been set high to reduce the required number of network requests.
|
||||
"""
|
||||
|
||||
q = gdata.service.Query()
|
||||
q.feed = feed % id
|
||||
q['start-index'] = start_index
|
||||
q['max-results'] = max_results
|
||||
q['min-viewability'] = min_viewability
|
||||
x = self.Get(uri=q.ToUri(), converter=converter)
|
||||
while 1:
|
||||
for entry in x.entry:
|
||||
yield entry
|
||||
else:
|
||||
l = x.GetNextLink()
|
||||
if l: # hope the server preserves our preferences
|
||||
x = self.Get(uri=l.href, converter=converter)
|
||||
else:
|
||||
break
|
||||
|
||||
def get_annotations(self, id='me', start_index="1", max_results="100",
|
||||
min_viewability="none", converter=books.BookFeed.FromString):
|
||||
"""
|
||||
Like get_library, but for the annotation feed
|
||||
"""
|
||||
|
||||
return self.get_library(id=id, feed=ANNOTATION_FEED,
|
||||
max_results=max_results, min_viewability = min_viewability,
|
||||
converter=converter)
|
||||
1044
python/gdata/calendar/__init__.py
Normal file
1044
python/gdata/calendar/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
300
python/gdata/calendar/data.py
Normal file
300
python/gdata/calendar/data.py
Normal file
@@ -0,0 +1,300 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Contains the data classes of the Google Calendar Data API"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
import atom.data
|
||||
import gdata.acl.data
|
||||
import gdata.data
|
||||
import gdata.geo.data
|
||||
import gdata.opensearch.data
|
||||
|
||||
|
||||
GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005/}%s'
|
||||
|
||||
|
||||
class AccessLevelProperty(atom.core.XmlElement):
|
||||
"""Describes how much a given user may do with an event or calendar"""
|
||||
_qname = GCAL_TEMPLATE % 'accesslevel'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class AllowGSync2Property(atom.core.XmlElement):
|
||||
"""Whether the user is permitted to run Google Apps Sync"""
|
||||
_qname = GCAL_TEMPLATE % 'allowGSync2'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class AllowGSyncProperty(atom.core.XmlElement):
|
||||
"""Whether the user is permitted to run Google Apps Sync"""
|
||||
_qname = GCAL_TEMPLATE % 'allowGSync'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class AnyoneCanAddSelfProperty(atom.core.XmlElement):
|
||||
"""Whether anyone can add self as attendee"""
|
||||
_qname = GCAL_TEMPLATE % 'anyoneCanAddSelf'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class CalendarAclRole(gdata.acl.data.AclRole):
|
||||
"""Describes the Calendar roles of an entry in the Calendar access control list"""
|
||||
_qname = gdata.acl.data.GACL_TEMPLATE % 'role'
|
||||
|
||||
|
||||
class CalendarCommentEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry in a feed of a Calendar event's comments"""
|
||||
|
||||
|
||||
class CalendarCommentFeed(gdata.data.GDFeed):
|
||||
"""Describes feed of a Calendar event's comments"""
|
||||
entry = [CalendarCommentEntry]
|
||||
|
||||
|
||||
class CalendarComments(gdata.data.Comments):
|
||||
"""Describes a container of a feed link for Calendar comment entries"""
|
||||
_qname = gdata.data.GD_TEMPLATE % 'comments'
|
||||
|
||||
|
||||
class CalendarExtendedProperty(gdata.data.ExtendedProperty):
|
||||
"""Defines a value for the realm attribute that is used only in the calendar API"""
|
||||
_qname = gdata.data.GD_TEMPLATE % 'extendedProperty'
|
||||
|
||||
|
||||
class CalendarWhere(gdata.data.Where):
|
||||
"""Extends the base Where class with Calendar extensions"""
|
||||
_qname = gdata.data.GD_TEMPLATE % 'where'
|
||||
|
||||
|
||||
class ColorProperty(atom.core.XmlElement):
|
||||
"""Describes the color of a calendar"""
|
||||
_qname = GCAL_TEMPLATE % 'color'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class GuestsCanInviteOthersProperty(atom.core.XmlElement):
|
||||
"""Whether guests can invite others to the event"""
|
||||
_qname = GCAL_TEMPLATE % 'guestsCanInviteOthers'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class GuestsCanModifyProperty(atom.core.XmlElement):
|
||||
"""Whether guests can modify event"""
|
||||
_qname = GCAL_TEMPLATE % 'guestsCanModify'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class GuestsCanSeeGuestsProperty(atom.core.XmlElement):
|
||||
"""Whether guests can see other attendees"""
|
||||
_qname = GCAL_TEMPLATE % 'guestsCanSeeGuests'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class HiddenProperty(atom.core.XmlElement):
|
||||
"""Describes whether a calendar is hidden"""
|
||||
_qname = GCAL_TEMPLATE % 'hidden'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class IcalUIDProperty(atom.core.XmlElement):
|
||||
"""Describes the UID in the ical export of the event"""
|
||||
_qname = GCAL_TEMPLATE % 'uid'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class OverrideNameProperty(atom.core.XmlElement):
|
||||
"""Describes the override name property of a calendar"""
|
||||
_qname = GCAL_TEMPLATE % 'overridename'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class PrivateCopyProperty(atom.core.XmlElement):
|
||||
"""Indicates whether this is a private copy of the event, changes to which should not be sent to other calendars"""
|
||||
_qname = GCAL_TEMPLATE % 'privateCopy'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class QuickAddProperty(atom.core.XmlElement):
|
||||
"""Describes whether gd:content is for quick-add processing"""
|
||||
_qname = GCAL_TEMPLATE % 'quickadd'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class ResourceProperty(atom.core.XmlElement):
|
||||
"""Describes whether gd:who is a resource such as a conference room"""
|
||||
_qname = GCAL_TEMPLATE % 'resource'
|
||||
value = 'value'
|
||||
id = 'id'
|
||||
|
||||
|
||||
class EventWho(gdata.data.Who):
|
||||
"""Extends the base Who class with Calendar extensions"""
|
||||
_qname = gdata.data.GD_TEMPLATE % 'who'
|
||||
resource = ResourceProperty
|
||||
|
||||
|
||||
class SelectedProperty(atom.core.XmlElement):
|
||||
"""Describes whether a calendar is selected"""
|
||||
_qname = GCAL_TEMPLATE % 'selected'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class SendAclNotificationsProperty(atom.core.XmlElement):
|
||||
"""Describes whether to send ACL notifications to grantees"""
|
||||
_qname = GCAL_TEMPLATE % 'sendAclNotifications'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class CalendarAclEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry in a feed of a Calendar access control list (ACL)"""
|
||||
send_acl_notifications = SendAclNotificationsProperty
|
||||
|
||||
|
||||
class CalendarAclFeed(gdata.data.GDFeed):
|
||||
"""Describes a Calendar access contorl list (ACL) feed"""
|
||||
entry = [CalendarAclEntry]
|
||||
|
||||
|
||||
class SendEventNotificationsProperty(atom.core.XmlElement):
|
||||
"""Describes whether to send event notifications to other participants of the event"""
|
||||
_qname = GCAL_TEMPLATE % 'sendEventNotifications'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class SequenceNumberProperty(atom.core.XmlElement):
|
||||
"""Describes sequence number of an event"""
|
||||
_qname = GCAL_TEMPLATE % 'sequence'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class CalendarRecurrenceExceptionEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry used by a Calendar recurrence exception entry link"""
|
||||
uid = IcalUIDProperty
|
||||
sequence = SequenceNumberProperty
|
||||
|
||||
|
||||
class CalendarRecurrenceException(gdata.data.RecurrenceException):
|
||||
"""Describes an exception to a recurring Calendar event"""
|
||||
_qname = gdata.data.GD_TEMPLATE % 'recurrenceException'
|
||||
|
||||
|
||||
class SettingsProperty(atom.core.XmlElement):
|
||||
"""User preference name-value pair"""
|
||||
_qname = GCAL_TEMPLATE % 'settingsProperty'
|
||||
name = 'name'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class SettingsEntry(gdata.data.GDEntry):
|
||||
"""Describes a Calendar Settings property entry"""
|
||||
settings_property = SettingsProperty
|
||||
|
||||
|
||||
class CalendarSettingsFeed(gdata.data.GDFeed):
|
||||
"""Personal settings for Calendar application"""
|
||||
entry = [SettingsEntry]
|
||||
|
||||
|
||||
class SuppressReplyNotificationsProperty(atom.core.XmlElement):
|
||||
"""Lists notification methods to be suppressed for this reply"""
|
||||
_qname = GCAL_TEMPLATE % 'suppressReplyNotifications'
|
||||
methods = 'methods'
|
||||
|
||||
|
||||
class SyncEventProperty(atom.core.XmlElement):
|
||||
"""Describes whether this is a sync scenario where the Ical UID and Sequence number are honored during inserts and updates"""
|
||||
_qname = GCAL_TEMPLATE % 'syncEvent'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class CalendarEventEntry(gdata.data.BatchEntry):
|
||||
"""Describes a Calendar event entry"""
|
||||
quickadd = QuickAddProperty
|
||||
send_event_notifications = SendEventNotificationsProperty
|
||||
sync_event = SyncEventProperty
|
||||
anyone_can_add_self = AnyoneCanAddSelfProperty
|
||||
extended_property = [CalendarExtendedProperty]
|
||||
sequence = SequenceNumberProperty
|
||||
guests_can_invite_others = GuestsCanInviteOthersProperty
|
||||
guests_can_modify = GuestsCanModifyProperty
|
||||
guests_can_see_guests = GuestsCanSeeGuestsProperty
|
||||
georss_where = gdata.geo.data.GeoRssWhere
|
||||
private_copy = PrivateCopyProperty
|
||||
suppress_reply_notifications = SuppressReplyNotificationsProperty
|
||||
uid = IcalUIDProperty
|
||||
|
||||
|
||||
class TimeZoneProperty(atom.core.XmlElement):
|
||||
"""Describes the time zone of a calendar"""
|
||||
_qname = GCAL_TEMPLATE % 'timezone'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class TimesCleanedProperty(atom.core.XmlElement):
|
||||
"""Describes how many times calendar was cleaned via Manage Calendars"""
|
||||
_qname = GCAL_TEMPLATE % 'timesCleaned'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class CalendarEntry(gdata.data.GDEntry):
|
||||
"""Describes a Calendar entry in the feed of a user's calendars"""
|
||||
timezone = TimeZoneProperty
|
||||
overridename = OverrideNameProperty
|
||||
hidden = HiddenProperty
|
||||
selected = SelectedProperty
|
||||
times_cleaned = TimesCleanedProperty
|
||||
color = ColorProperty
|
||||
where = [CalendarWhere]
|
||||
accesslevel = AccessLevelProperty
|
||||
|
||||
|
||||
class CalendarEventFeed(gdata.data.BatchFeed):
|
||||
"""Describes a Calendar event feed"""
|
||||
allow_g_sync2 = AllowGSync2Property
|
||||
timezone = TimeZoneProperty
|
||||
entry = [CalendarEventEntry]
|
||||
times_cleaned = TimesCleanedProperty
|
||||
allow_g_sync = AllowGSyncProperty
|
||||
|
||||
|
||||
class CalendarFeed(gdata.data.GDFeed):
|
||||
"""Describes a feed of Calendars"""
|
||||
entry = [CalendarEntry]
|
||||
|
||||
|
||||
class WebContentGadgetPref(atom.core.XmlElement):
|
||||
"""Describes a single web content gadget preference"""
|
||||
_qname = GCAL_TEMPLATE % 'webContentGadgetPref'
|
||||
name = 'name'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class WebContent(atom.core.XmlElement):
|
||||
"""Describes a "web content" extension"""
|
||||
_qname = GCAL_TEMPLATE % 'webContent'
|
||||
height = 'height'
|
||||
width = 'width'
|
||||
web_content_gadget_pref = [WebContentGadgetPref]
|
||||
url = 'url'
|
||||
display = 'display'
|
||||
|
||||
|
||||
595
python/gdata/calendar/service.py
Normal file
595
python/gdata/calendar/service.py
Normal file
@@ -0,0 +1,595 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2006 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""CalendarService extends the GDataService to streamline Google Calendar operations.
|
||||
|
||||
CalendarService: Provides methods to query feeds and manipulate items. Extends
|
||||
GDataService.
|
||||
|
||||
DictionaryToParamList: Function which converts a dictionary into a list of
|
||||
URL arguments (represented as strings). This is a
|
||||
utility function used in CRUD operations.
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'api.vli (Vivian Li)'
|
||||
|
||||
|
||||
import urllib
|
||||
import gdata
|
||||
import atom.service
|
||||
import gdata.service
|
||||
import gdata.calendar
|
||||
import atom
|
||||
|
||||
|
||||
DEFAULT_BATCH_URL = ('http://www.google.com/calendar/feeds/default/private'
|
||||
'/full/batch')
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RequestError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class CalendarService(gdata.service.GDataService):
|
||||
"""Client for the Google Calendar service."""
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server='www.google.com', additional_headers=None, **kwargs):
|
||||
"""Creates a client for the Google Calendar service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'www.google.com'.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service='cl', source=source,
|
||||
server=server, additional_headers=additional_headers, **kwargs)
|
||||
|
||||
def GetCalendarEventFeed(self, uri='/calendar/feeds/default/private/full'):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarEventFeedFromString)
|
||||
|
||||
def GetCalendarEventEntry(self, uri):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarEventEntryFromString)
|
||||
|
||||
def GetCalendarListFeed(self, uri='/calendar/feeds/default/allcalendars/full'):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString)
|
||||
|
||||
def GetAllCalendarsFeed(self, uri='/calendar/feeds/default/allcalendars/full'):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString)
|
||||
|
||||
def GetOwnCalendarsFeed(self, uri='/calendar/feeds/default/owncalendars/full'):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString)
|
||||
|
||||
def GetCalendarListEntry(self, uri):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarListEntryFromString)
|
||||
|
||||
def GetCalendarAclFeed(self, uri='/calendar/feeds/default/acl/full'):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarAclFeedFromString)
|
||||
|
||||
def GetCalendarAclEntry(self, uri):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarAclEntryFromString)
|
||||
|
||||
def GetCalendarEventCommentFeed(self, uri):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarEventCommentFeedFromString)
|
||||
|
||||
def GetCalendarEventCommentEntry(self, uri):
|
||||
return self.Get(uri, converter=gdata.calendar.CalendarEventCommentEntryFromString)
|
||||
|
||||
def Query(self, uri, converter=None):
|
||||
"""Performs a query and returns a resulting feed or entry.
|
||||
|
||||
Args:
|
||||
feed: string The feed which is to be queried
|
||||
|
||||
Returns:
|
||||
On success, a GDataFeed or Entry depending on which is sent from the
|
||||
server.
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
if converter:
|
||||
result = self.Get(uri, converter=converter)
|
||||
else:
|
||||
result = self.Get(uri)
|
||||
return result
|
||||
|
||||
def CalendarQuery(self, query):
|
||||
if isinstance(query, CalendarEventQuery):
|
||||
return self.Query(query.ToUri(),
|
||||
converter=gdata.calendar.CalendarEventFeedFromString)
|
||||
elif isinstance(query, CalendarListQuery):
|
||||
return self.Query(query.ToUri(),
|
||||
converter=gdata.calendar.CalendarListFeedFromString)
|
||||
elif isinstance(query, CalendarEventCommentQuery):
|
||||
return self.Query(query.ToUri(),
|
||||
converter=gdata.calendar.CalendarEventCommentFeedFromString)
|
||||
else:
|
||||
return self.Query(query.ToUri())
|
||||
|
||||
def InsertEvent(self, new_event, insert_uri, url_params=None,
|
||||
escape_params=True):
|
||||
"""Adds an event to Google Calendar.
|
||||
|
||||
Args:
|
||||
new_event: atom.Entry or subclass A new event which is to be added to
|
||||
Google Calendar.
|
||||
insert_uri: the URL to post new events to the feed
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the event created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
return self.Post(new_event, insert_uri, url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.calendar.CalendarEventEntryFromString)
|
||||
|
||||
def InsertCalendarSubscription(self, calendar, url_params=None,
|
||||
escape_params=True):
|
||||
"""Subscribes the authenticated user to the provided calendar.
|
||||
|
||||
Args:
|
||||
calendar: The calendar to which the user should be subscribed.
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the subscription created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
insert_uri = '/calendar/feeds/default/allcalendars/full'
|
||||
return self.Post(calendar, insert_uri, url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.calendar.CalendarListEntryFromString)
|
||||
|
||||
def InsertCalendar(self, new_calendar, url_params=None,
|
||||
escape_params=True):
|
||||
"""Creates a new calendar.
|
||||
|
||||
Args:
|
||||
new_calendar: The calendar to be created
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the calendar created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
insert_uri = '/calendar/feeds/default/owncalendars/full'
|
||||
response = self.Post(new_calendar, insert_uri, url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.calendar.CalendarListEntryFromString)
|
||||
return response
|
||||
|
||||
def UpdateCalendar(self, calendar, url_params=None,
|
||||
escape_params=True):
|
||||
"""Updates a calendar.
|
||||
|
||||
Args:
|
||||
calendar: The calendar which should be updated
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the calendar created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
update_uri = calendar.GetEditLink().href
|
||||
response = self.Put(data=calendar, uri=update_uri, url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.calendar.CalendarListEntryFromString)
|
||||
return response
|
||||
|
||||
def InsertAclEntry(self, new_entry, insert_uri, url_params=None,
|
||||
escape_params=True):
|
||||
"""Adds an ACL entry (rule) to Google Calendar.
|
||||
|
||||
Args:
|
||||
new_entry: atom.Entry or subclass A new ACL entry which is to be added to
|
||||
Google Calendar.
|
||||
insert_uri: the URL to post new entries to the ACL feed
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the ACL entry created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
return self.Post(new_entry, insert_uri, url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.calendar.CalendarAclEntryFromString)
|
||||
|
||||
def InsertEventComment(self, new_entry, insert_uri, url_params=None,
|
||||
escape_params=True):
|
||||
"""Adds an entry to Google Calendar.
|
||||
|
||||
Args:
|
||||
new_entry: atom.Entry or subclass A new entry which is to be added to
|
||||
Google Calendar.
|
||||
insert_uri: the URL to post new entrys to the feed
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the comment created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
return self.Post(new_entry, insert_uri, url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.calendar.CalendarEventCommentEntryFromString)
|
||||
|
||||
def _RemoveStandardUrlPrefix(self, url):
|
||||
url_prefix = 'http://%s/' % self.server
|
||||
if url.startswith(url_prefix):
|
||||
return url[len(url_prefix) - 1:]
|
||||
return url
|
||||
|
||||
def DeleteEvent(self, edit_uri, extra_headers=None,
|
||||
url_params=None, escape_params=True):
|
||||
"""Removes an event with the specified ID from Google Calendar.
|
||||
|
||||
Args:
|
||||
edit_uri: string The edit URL of the entry to be deleted. Example:
|
||||
'http://www.google.com/calendar/feeds/default/private/full/abx'
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the deletion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful delete, a httplib.HTTPResponse containing the server's
|
||||
response to the DELETE request.
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
edit_uri = self._RemoveStandardUrlPrefix(edit_uri)
|
||||
return self.Delete('%s' % edit_uri,
|
||||
url_params=url_params, escape_params=escape_params)
|
||||
|
||||
def DeleteAclEntry(self, edit_uri, extra_headers=None,
|
||||
url_params=None, escape_params=True):
|
||||
"""Removes an ACL entry at the given edit_uri from Google Calendar.
|
||||
|
||||
Args:
|
||||
edit_uri: string The edit URL of the entry to be deleted. Example:
|
||||
'http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the deletion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful delete, a httplib.HTTPResponse containing the server's
|
||||
response to the DELETE request.
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
edit_uri = self._RemoveStandardUrlPrefix(edit_uri)
|
||||
return self.Delete('%s' % edit_uri,
|
||||
url_params=url_params, escape_params=escape_params)
|
||||
|
||||
def DeleteCalendarEntry(self, edit_uri, extra_headers=None,
|
||||
url_params=None, escape_params=True):
|
||||
"""Removes a calendar entry at the given edit_uri from Google Calendar.
|
||||
|
||||
Args:
|
||||
edit_uri: string The edit URL of the entry to be deleted. Example:
|
||||
'http://www.google.com/calendar/feeds/default/allcalendars/abcdef@group.calendar.google.com'
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the deletion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful delete, True is returned
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
return self.Delete(edit_uri, url_params=url_params,
|
||||
escape_params=escape_params)
|
||||
|
||||
def UpdateEvent(self, edit_uri, updated_event, url_params=None,
|
||||
escape_params=True):
|
||||
"""Updates an existing event.
|
||||
|
||||
Args:
|
||||
edit_uri: string The edit link URI for the element being updated
|
||||
updated_event: string, atom.Entry, or subclass containing
|
||||
the Atom Entry which will replace the event which is
|
||||
stored at the edit_url
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the update request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful update, a httplib.HTTPResponse containing the server's
|
||||
response to the PUT request.
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
edit_uri = self._RemoveStandardUrlPrefix(edit_uri)
|
||||
return self.Put(updated_event, '%s' % edit_uri,
|
||||
url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.calendar.CalendarEventEntryFromString)
|
||||
|
||||
def UpdateAclEntry(self, edit_uri, updated_rule, url_params=None,
|
||||
escape_params=True):
|
||||
"""Updates an existing ACL rule.
|
||||
|
||||
Args:
|
||||
edit_uri: string The edit link URI for the element being updated
|
||||
updated_rule: string, atom.Entry, or subclass containing
|
||||
the Atom Entry which will replace the event which is
|
||||
stored at the edit_url
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the update request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful update, a httplib.HTTPResponse containing the server's
|
||||
response to the PUT request.
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
edit_uri = self._RemoveStandardUrlPrefix(edit_uri)
|
||||
return self.Put(updated_rule, '%s' % edit_uri,
|
||||
url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.calendar.CalendarAclEntryFromString)
|
||||
|
||||
def ExecuteBatch(self, batch_feed, url,
|
||||
converter=gdata.calendar.CalendarEventFeedFromString):
|
||||
"""Sends a batch request feed to the server.
|
||||
|
||||
The batch request needs to be sent to the batch URL for a particular
|
||||
calendar. You can find the URL by calling GetBatchLink().href on the
|
||||
CalendarEventFeed.
|
||||
|
||||
Args:
|
||||
batch_feed: gdata.calendar.CalendarEventFeed A feed containing batch
|
||||
request entries. Each entry contains the operation to be performed
|
||||
on the data contained in the entry. For example an entry with an
|
||||
operation type of insert will be used as if the individual entry
|
||||
had been inserted.
|
||||
url: str The batch URL for the Calendar to which these operations should
|
||||
be applied.
|
||||
converter: Function (optional) The function used to convert the server's
|
||||
response to an object. The default value is
|
||||
CalendarEventFeedFromString.
|
||||
|
||||
Returns:
|
||||
The results of the batch request's execution on the server. If the
|
||||
default converter is used, this is stored in a CalendarEventFeed.
|
||||
"""
|
||||
return self.Post(batch_feed, url, converter=converter)
|
||||
|
||||
|
||||
class CalendarEventQuery(gdata.service.Query):
|
||||
|
||||
def __init__(self, user='default', visibility='private', projection='full',
|
||||
text_query=None, params=None, categories=None):
|
||||
gdata.service.Query.__init__(self,
|
||||
feed='http://www.google.com/calendar/feeds/%s/%s/%s' % (
|
||||
urllib.quote(user),
|
||||
urllib.quote(visibility),
|
||||
urllib.quote(projection)),
|
||||
text_query=text_query, params=params, categories=categories)
|
||||
|
||||
def _GetStartMin(self):
|
||||
if 'start-min' in self.keys():
|
||||
return self['start-min']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetStartMin(self, val):
|
||||
self['start-min'] = val
|
||||
|
||||
start_min = property(_GetStartMin, _SetStartMin,
|
||||
doc="""The start-min query parameter""")
|
||||
|
||||
def _GetStartMax(self):
|
||||
if 'start-max' in self.keys():
|
||||
return self['start-max']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetStartMax(self, val):
|
||||
self['start-max'] = val
|
||||
|
||||
start_max = property(_GetStartMax, _SetStartMax,
|
||||
doc="""The start-max query parameter""")
|
||||
|
||||
def _GetOrderBy(self):
|
||||
if 'orderby' in self.keys():
|
||||
return self['orderby']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetOrderBy(self, val):
|
||||
if val is not 'lastmodified' and val is not 'starttime':
|
||||
raise Error, "Order By must be either 'lastmodified' or 'starttime'"
|
||||
self['orderby'] = val
|
||||
|
||||
orderby = property(_GetOrderBy, _SetOrderBy,
|
||||
doc="""The orderby query parameter""")
|
||||
|
||||
def _GetSortOrder(self):
|
||||
if 'sortorder' in self.keys():
|
||||
return self['sortorder']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetSortOrder(self, val):
|
||||
if (val is not 'ascending' and val is not 'descending'
|
||||
and val is not 'a' and val is not 'd' and val is not 'ascend'
|
||||
and val is not 'descend'):
|
||||
raise Error, "Sort order must be either ascending, ascend, " + (
|
||||
"a or descending, descend, or d")
|
||||
self['sortorder'] = val
|
||||
|
||||
sortorder = property(_GetSortOrder, _SetSortOrder,
|
||||
doc="""The sortorder query parameter""")
|
||||
|
||||
def _GetSingleEvents(self):
|
||||
if 'singleevents' in self.keys():
|
||||
return self['singleevents']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetSingleEvents(self, val):
|
||||
self['singleevents'] = val
|
||||
|
||||
singleevents = property(_GetSingleEvents, _SetSingleEvents,
|
||||
doc="""The singleevents query parameter""")
|
||||
|
||||
def _GetFutureEvents(self):
|
||||
if 'futureevents' in self.keys():
|
||||
return self['futureevents']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetFutureEvents(self, val):
|
||||
self['futureevents'] = val
|
||||
|
||||
futureevents = property(_GetFutureEvents, _SetFutureEvents,
|
||||
doc="""The futureevents query parameter""")
|
||||
|
||||
def _GetRecurrenceExpansionStart(self):
|
||||
if 'recurrence-expansion-start' in self.keys():
|
||||
return self['recurrence-expansion-start']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetRecurrenceExpansionStart(self, val):
|
||||
self['recurrence-expansion-start'] = val
|
||||
|
||||
recurrence_expansion_start = property(_GetRecurrenceExpansionStart,
|
||||
_SetRecurrenceExpansionStart,
|
||||
doc="""The recurrence-expansion-start query parameter""")
|
||||
|
||||
def _GetRecurrenceExpansionEnd(self):
|
||||
if 'recurrence-expansion-end' in self.keys():
|
||||
return self['recurrence-expansion-end']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetRecurrenceExpansionEnd(self, val):
|
||||
self['recurrence-expansion-end'] = val
|
||||
|
||||
recurrence_expansion_end = property(_GetRecurrenceExpansionEnd,
|
||||
_SetRecurrenceExpansionEnd,
|
||||
doc="""The recurrence-expansion-end query parameter""")
|
||||
|
||||
def _SetTimezone(self, val):
|
||||
self['ctz'] = val
|
||||
|
||||
def _GetTimezone(self):
|
||||
if 'ctz' in self.keys():
|
||||
return self['ctz']
|
||||
else:
|
||||
return None
|
||||
|
||||
ctz = property(_GetTimezone, _SetTimezone,
|
||||
doc="""The ctz query parameter which sets report time on the server.""")
|
||||
|
||||
|
||||
class CalendarListQuery(gdata.service.Query):
|
||||
"""Queries the Google Calendar meta feed"""
|
||||
|
||||
def __init__(self, userId=None, text_query=None,
|
||||
params=None, categories=None):
|
||||
if userId is None:
|
||||
userId = 'default'
|
||||
|
||||
gdata.service.Query.__init__(self, feed='http://www.google.com/calendar/feeds/'
|
||||
+userId,
|
||||
text_query=text_query, params=params,
|
||||
categories=categories)
|
||||
|
||||
class CalendarEventCommentQuery(gdata.service.Query):
|
||||
"""Queries the Google Calendar event comments feed"""
|
||||
|
||||
def __init__(self, feed=None):
|
||||
gdata.service.Query.__init__(self, feed=feed)
|
||||
1
python/gdata/calendar_resource/__init__.py
Normal file
1
python/gdata/calendar_resource/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
200
python/gdata/calendar_resource/client.py
Normal file
200
python/gdata/calendar_resource/client.py
Normal file
@@ -0,0 +1,200 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""CalendarResourceClient simplifies Calendar Resources API calls.
|
||||
|
||||
CalendarResourceClient extends gdata.client.GDClient to ease interaction with
|
||||
the Google Apps Calendar Resources API. These interactions include the ability
|
||||
to create, retrieve, update, and delete calendar resources in a Google Apps
|
||||
domain.
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'Vic Fryzel <vf@google.com>'
|
||||
|
||||
|
||||
import gdata.calendar_resource.data
|
||||
import gdata.client
|
||||
import urllib
|
||||
|
||||
|
||||
# Feed URI template. This must end with a /
|
||||
# The strings in this template are eventually replaced with the API version
|
||||
# and Google Apps domain name, respectively.
|
||||
RESOURCE_FEED_TEMPLATE = '/a/feeds/calendar/resource/%s/%s/'
|
||||
|
||||
|
||||
class CalendarResourceClient(gdata.client.GDClient):
|
||||
"""Client extension for the Google Calendar Resource API service.
|
||||
|
||||
Attributes:
|
||||
host: string The hostname for the Calendar Resouce API service.
|
||||
api_version: string The version of the Calendar Resource API.
|
||||
"""
|
||||
|
||||
host = 'apps-apis.google.com'
|
||||
api_version = '2.0'
|
||||
auth_service = 'apps'
|
||||
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
|
||||
ssl = True
|
||||
|
||||
def __init__(self, domain, auth_token=None, **kwargs):
|
||||
"""Constructs a new client for the Calendar Resource API.
|
||||
|
||||
Args:
|
||||
domain: string The Google Apps domain with Calendar Resources.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the calendar resource
|
||||
data.
|
||||
kwargs: The other parameters to pass to the gdata.client.GDClient
|
||||
constructor.
|
||||
"""
|
||||
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
|
||||
self.domain = domain
|
||||
|
||||
def make_resource_feed_uri(self, resource_id=None, params=None):
|
||||
"""Creates a resource feed URI for the Calendar Resource API.
|
||||
|
||||
Using this client's Google Apps domain, create a feed URI for calendar
|
||||
resources in that domain. If a resource_id is provided, return a URI
|
||||
for that specific resource. If params are provided, append them as GET
|
||||
params.
|
||||
|
||||
Args:
|
||||
resource_id: string (optional) The ID of the calendar resource for which
|
||||
to make a feed URI.
|
||||
params: dict (optional) key -> value params to append as GET vars to the
|
||||
URI. Example: params={'start': 'my-resource-id'}
|
||||
Returns:
|
||||
A string giving the URI for calendar resources for this client's Google
|
||||
Apps domain.
|
||||
"""
|
||||
uri = RESOURCE_FEED_TEMPLATE % (self.api_version, self.domain)
|
||||
if resource_id:
|
||||
uri += resource_id
|
||||
if params:
|
||||
uri += '?' + urllib.urlencode(params)
|
||||
return uri
|
||||
|
||||
MakeResourceFeedUri = make_resource_feed_uri
|
||||
|
||||
def get_resource_feed(self, uri=None, **kwargs):
|
||||
"""Fetches a ResourceFeed of calendar resources at the given URI.
|
||||
|
||||
Args:
|
||||
uri: string The URI of the feed to pull.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient.get_feed().
|
||||
|
||||
Returns:
|
||||
A ResourceFeed object representing the feed at the given URI.
|
||||
"""
|
||||
|
||||
if uri is None:
|
||||
uri = self.MakeResourceFeedUri()
|
||||
return self.get_feed(
|
||||
uri,
|
||||
desired_class=gdata.calendar_resource.data.CalendarResourceFeed,
|
||||
**kwargs)
|
||||
|
||||
GetResourceFeed = get_resource_feed
|
||||
|
||||
def get_resource(self, uri=None, resource_id=None, **kwargs):
|
||||
"""Fetches a single calendar resource by resource ID.
|
||||
|
||||
Args:
|
||||
uri: string The base URI of the feed from which to fetch the resource.
|
||||
resource_id: string The string ID of the Resource to fetch.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient.get_entry().
|
||||
|
||||
Returns:
|
||||
A Resource object representing the calendar resource with the given
|
||||
base URI and resource ID.
|
||||
"""
|
||||
|
||||
if uri is None:
|
||||
uri = self.MakeResourceFeedUri(resource_id)
|
||||
return self.get_entry(
|
||||
uri,
|
||||
desired_class=gdata.calendar_resource.data.CalendarResourceEntry,
|
||||
**kwargs)
|
||||
|
||||
GetResource = get_resource
|
||||
|
||||
def create_resource(self, resource_id, resource_common_name=None,
|
||||
resource_description=None, resource_type=None, **kwargs):
|
||||
"""Creates a calendar resource with the given properties.
|
||||
|
||||
Args:
|
||||
resource_id: string The resource ID of the calendar resource.
|
||||
resource_common_name: string (optional) The common name of the resource.
|
||||
resource_description: string (optional) The description of the resource.
|
||||
resource_type: string (optional) The type of the resource.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient.post().
|
||||
|
||||
Returns:
|
||||
gdata.calendar_resource.data.CalendarResourceEntry of the new resource.
|
||||
"""
|
||||
new_resource = gdata.calendar_resource.data.CalendarResourceEntry(
|
||||
resource_id=resource_id,
|
||||
resource_common_name=resource_common_name,
|
||||
resource_description=resource_description,
|
||||
resource_type=resource_type)
|
||||
return self.post(new_resource, self.MakeResourceFeedUri(), **kwargs)
|
||||
|
||||
CreateResource = create_resource
|
||||
|
||||
def update_resource(self, resource_id, resource_common_name=None,
|
||||
resource_description=None, resource_type=None, **kwargs):
|
||||
"""Updates the calendar resource with the given resource ID.
|
||||
|
||||
Args:
|
||||
resource_id: string The resource ID of the calendar resource to update.
|
||||
resource_common_name: string (optional) The common name to give the
|
||||
resource.
|
||||
resource_description: string (optional) The description to give the
|
||||
resource.
|
||||
resource_type: string (optional) The type to give the resource.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient.update().
|
||||
|
||||
Returns:
|
||||
gdata.calendar_resource.data.CalendarResourceEntry of the updated
|
||||
resource.
|
||||
"""
|
||||
new_resource = gdata.calendar_resource.data.CalendarResourceEntry(
|
||||
resource_id=resource_id,
|
||||
resource_common_name=resource_common_name,
|
||||
resource_description=resource_description,
|
||||
resource_type=resource_type)
|
||||
return self.update(
|
||||
new_resource,
|
||||
**kwargs)
|
||||
|
||||
UpdateResource = update_resource
|
||||
|
||||
def delete_resource(self, resource_id, **kwargs):
|
||||
"""Deletes the calendar resource with the given resource ID.
|
||||
|
||||
Args:
|
||||
resource_id: string The resource ID of the calendar resource to delete.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient.delete()
|
||||
|
||||
Returns:
|
||||
An HTTP response object. See gdata.client.request().
|
||||
"""
|
||||
|
||||
return self.delete(self.MakeResourceFeedUri(resource_id), **kwargs)
|
||||
|
||||
DeleteResource = delete_resource
|
||||
193
python/gdata/calendar_resource/data.py
Normal file
193
python/gdata/calendar_resource/data.py
Normal file
@@ -0,0 +1,193 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Data model for parsing and generating XML for the Calendar Resource API."""
|
||||
|
||||
|
||||
__author__ = 'Vic Fryzel <vf@google.com>'
|
||||
|
||||
|
||||
import atom.core
|
||||
import atom.data
|
||||
import gdata.apps
|
||||
import gdata.apps_property
|
||||
import gdata.data
|
||||
|
||||
|
||||
# This is required to work around a naming conflict between the Google
|
||||
# Spreadsheets API and Python's built-in property function
|
||||
pyproperty = property
|
||||
|
||||
|
||||
# The apps:property name of the resourceId property
|
||||
RESOURCE_ID_NAME = 'resourceId'
|
||||
# The apps:property name of the resourceCommonName property
|
||||
RESOURCE_COMMON_NAME_NAME = 'resourceCommonName'
|
||||
# The apps:property name of the resourceDescription property
|
||||
RESOURCE_DESCRIPTION_NAME = 'resourceDescription'
|
||||
# The apps:property name of the resourceType property
|
||||
RESOURCE_TYPE_NAME = 'resourceType'
|
||||
|
||||
|
||||
class CalendarResourceEntry(gdata.data.GDEntry):
|
||||
"""Represents a Calendar Resource entry in object form."""
|
||||
|
||||
property = [gdata.apps_property.AppsProperty]
|
||||
|
||||
def _GetProperty(self, name):
|
||||
"""Get the apps:property value with the given name.
|
||||
|
||||
Args:
|
||||
name: string Name of the apps:property value to get.
|
||||
|
||||
Returns:
|
||||
The apps:property value with the given name, or None if the name was
|
||||
invalid.
|
||||
"""
|
||||
|
||||
for p in self.property:
|
||||
if p.name == name:
|
||||
return p.value
|
||||
return None
|
||||
|
||||
def _SetProperty(self, name, value):
|
||||
"""Set the apps:property value with the given name to the given value.
|
||||
|
||||
Args:
|
||||
name: string Name of the apps:property value to set.
|
||||
value: string Value to give the apps:property value with the given name.
|
||||
"""
|
||||
|
||||
for i in range(len(self.property)):
|
||||
if self.property[i].name == name:
|
||||
self.property[i].value = value
|
||||
return
|
||||
self.property.append(gdata.apps_property.AppsProperty(name=name, value=value))
|
||||
|
||||
def GetResourceId(self):
|
||||
"""Get the resource ID of this Calendar Resource object.
|
||||
|
||||
Returns:
|
||||
The resource ID of this Calendar Resource object as a string or None.
|
||||
"""
|
||||
|
||||
return self._GetProperty(RESOURCE_ID_NAME)
|
||||
|
||||
def SetResourceId(self, value):
|
||||
"""Set the resource ID of this Calendar Resource object.
|
||||
|
||||
Args:
|
||||
value: string The new resource ID value to give this object.
|
||||
"""
|
||||
|
||||
self._SetProperty(RESOURCE_ID_NAME, value)
|
||||
|
||||
resource_id = pyproperty(GetResourceId, SetResourceId)
|
||||
|
||||
def GetResourceCommonName(self):
|
||||
"""Get the common name of this Calendar Resource object.
|
||||
|
||||
Returns:
|
||||
The common name of this Calendar Resource object as a string or None.
|
||||
"""
|
||||
|
||||
return self._GetProperty(RESOURCE_COMMON_NAME_NAME)
|
||||
|
||||
def SetResourceCommonName(self, value):
|
||||
"""Set the common name of this Calendar Resource object.
|
||||
|
||||
Args:
|
||||
value: string The new common name value to give this object.
|
||||
"""
|
||||
|
||||
self._SetProperty(RESOURCE_COMMON_NAME_NAME, value)
|
||||
|
||||
resource_common_name = pyproperty(
|
||||
GetResourceCommonName,
|
||||
SetResourceCommonName)
|
||||
|
||||
def GetResourceDescription(self):
|
||||
"""Get the description of this Calendar Resource object.
|
||||
|
||||
Returns:
|
||||
The description of this Calendar Resource object as a string or None.
|
||||
"""
|
||||
|
||||
return self._GetProperty(RESOURCE_DESCRIPTION_NAME)
|
||||
|
||||
def SetResourceDescription(self, value):
|
||||
"""Set the description of this Calendar Resource object.
|
||||
|
||||
Args:
|
||||
value: string The new description value to give this object.
|
||||
"""
|
||||
|
||||
self._SetProperty(RESOURCE_DESCRIPTION_NAME, value)
|
||||
|
||||
resource_description = pyproperty(
|
||||
GetResourceDescription,
|
||||
SetResourceDescription)
|
||||
|
||||
def GetResourceType(self):
|
||||
"""Get the type of this Calendar Resource object.
|
||||
|
||||
Returns:
|
||||
The type of this Calendar Resource object as a string or None.
|
||||
"""
|
||||
|
||||
return self._GetProperty(RESOURCE_TYPE_NAME)
|
||||
|
||||
def SetResourceType(self, value):
|
||||
"""Set the type value of this Calendar Resource object.
|
||||
|
||||
Args:
|
||||
value: string The new type value to give this object.
|
||||
"""
|
||||
|
||||
self._SetProperty(RESOURCE_TYPE_NAME, value)
|
||||
|
||||
resource_type = pyproperty(GetResourceType, SetResourceType)
|
||||
|
||||
def __init__(self, resource_id=None, resource_common_name=None,
|
||||
resource_description=None, resource_type=None, *args, **kwargs):
|
||||
"""Constructs a new CalendarResourceEntry object with the given arguments.
|
||||
|
||||
Args:
|
||||
resource_id: string (optional) The resource ID to give this new object.
|
||||
resource_common_name: string (optional) The common name to give this new
|
||||
object.
|
||||
resource_description: string (optional) The description to give this new
|
||||
object.
|
||||
resource_type: string (optional) The type to give this new object.
|
||||
args: The other parameters to pass to gdata.entry.GDEntry constructor.
|
||||
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
|
||||
"""
|
||||
super(CalendarResourceEntry, self).__init__(*args, **kwargs)
|
||||
if resource_id:
|
||||
self.resource_id = resource_id
|
||||
if resource_common_name:
|
||||
self.resource_common_name = resource_common_name
|
||||
if resource_description:
|
||||
self.resource_description = resource_description
|
||||
if resource_type:
|
||||
self.resource_type = resource_type
|
||||
|
||||
|
||||
class CalendarResourceFeed(gdata.data.GDFeed):
|
||||
"""Represents a feed of CalendarResourceEntry objects."""
|
||||
|
||||
# Override entry so that this feed knows how to type its list of entries.
|
||||
entry = [CalendarResourceEntry]
|
||||
1126
python/gdata/client.py
Normal file
1126
python/gdata/client.py
Normal file
File diff suppressed because it is too large
Load Diff
136
python/gdata/codesearch/__init__.py
Normal file
136
python/gdata/codesearch/__init__.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2007 Benoit Chesneau <benoitc@metavers.net>
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software for any
|
||||
# purpose with or without fee is hereby granted, provided that the above
|
||||
# copyright notice and this permission notice appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
|
||||
"""Contains extensions to Atom objects used by Google Codesearch"""
|
||||
|
||||
__author__ = 'Benoit Chesneau'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
CODESEARCH_NAMESPACE='http://schemas.google.com/codesearch/2006'
|
||||
CODESEARCH_TEMPLATE='{http://shema.google.com/codesearch/2006}%s'
|
||||
|
||||
|
||||
class Match(atom.AtomBase):
|
||||
""" The Google Codesearch match element """
|
||||
_tag = 'match'
|
||||
_namespace = CODESEARCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['lineNumber'] = 'line_number'
|
||||
_attributes['type'] = 'type'
|
||||
|
||||
def __init__(self, line_number=None, type=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.text = text
|
||||
self.type = type
|
||||
self.line_number = line_number
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class File(atom.AtomBase):
|
||||
""" The Google Codesearch file element"""
|
||||
_tag = 'file'
|
||||
_namespace = CODESEARCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['name'] = 'name'
|
||||
|
||||
def __init__(self, name=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.text = text
|
||||
self.name = name
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class Package(atom.AtomBase):
|
||||
""" The Google Codesearch package element"""
|
||||
_tag = 'package'
|
||||
_namespace = CODESEARCH_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['name'] = 'name'
|
||||
_attributes['uri'] = 'uri'
|
||||
|
||||
def __init__(self, name=None, uri=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.text = text
|
||||
self.name = name
|
||||
self.uri = uri
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class CodesearchEntry(gdata.GDataEntry):
|
||||
""" Google codesearch atom entry"""
|
||||
_tag = gdata.GDataEntry._tag
|
||||
_namespace = gdata.GDataEntry._namespace
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
|
||||
_children['{%s}file' % CODESEARCH_NAMESPACE] = ('file', File)
|
||||
_children['{%s}package' % CODESEARCH_NAMESPACE] = ('package', Package)
|
||||
_children['{%s}match' % CODESEARCH_NAMESPACE] = ('match', [Match])
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None,
|
||||
match=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
|
||||
gdata.GDataEntry.__init__(self, author=author, category=category,
|
||||
content=content, atom_id=atom_id, link=link,
|
||||
published=published, title=title,
|
||||
updated=updated, text=None)
|
||||
|
||||
self.match = match or []
|
||||
|
||||
|
||||
def CodesearchEntryFromString(xml_string):
|
||||
"""Converts an XML string into a CodesearchEntry object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Codesearch feed entry.
|
||||
|
||||
Returns:
|
||||
A CodesearchEntry object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(CodesearchEntry, xml_string)
|
||||
|
||||
|
||||
class CodesearchFeed(gdata.GDataFeed):
|
||||
"""feed containing list of Google codesearch Items"""
|
||||
_tag = gdata.GDataFeed._tag
|
||||
_namespace = gdata.GDataFeed._namespace
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CodesearchEntry])
|
||||
|
||||
|
||||
def CodesearchFeedFromString(xml_string):
|
||||
"""Converts an XML string into a CodesearchFeed object.
|
||||
Args:
|
||||
xml_string: string The XML describing a Codesearch feed.
|
||||
Returns:
|
||||
A CodeseartchFeed object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(CodesearchFeed, xml_string)
|
||||
109
python/gdata/codesearch/service.py
Normal file
109
python/gdata/codesearch/service.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2007 Benoit Chesneau <benoitc@metavers.net>
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software for any
|
||||
# purpose with or without fee is hereby granted, provided that the above
|
||||
# copyright notice and this permission notice appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
|
||||
"""CodesearchService extends GDataService to streamline Google Codesearch
|
||||
operations"""
|
||||
|
||||
|
||||
__author__ = 'Benoit Chesneau'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata.service
|
||||
import gdata.codesearch
|
||||
|
||||
|
||||
class CodesearchService(gdata.service.GDataService):
|
||||
"""Client extension for Google codesearch service"""
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server='www.google.com', additional_headers=None, **kwargs):
|
||||
"""Creates a client for the Google codesearch service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'www.google.com'.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service='codesearch',
|
||||
source=source, server=server, additional_headers=additional_headers,
|
||||
**kwargs)
|
||||
|
||||
def Query(self, uri, converter=gdata.codesearch.CodesearchFeedFromString):
|
||||
"""Queries the Codesearch feed and returns the resulting feed of
|
||||
entries.
|
||||
|
||||
Args:
|
||||
uri: string The full URI to be queried. This can contain query
|
||||
parameters, a hostname, or simply the relative path to a Document
|
||||
List feed. The DocumentQuery object is useful when constructing
|
||||
query parameters.
|
||||
converter: func (optional) A function which will be executed on the
|
||||
retrieved item, generally to render it into a Python object.
|
||||
By default the CodesearchFeedFromString function is used to
|
||||
return a CodesearchFeed object. This is because most feed
|
||||
queries will result in a feed and not a single entry.
|
||||
|
||||
Returns :
|
||||
A CodesearchFeed objects representing the feed returned by the server
|
||||
"""
|
||||
return self.Get(uri, converter=converter)
|
||||
|
||||
def GetSnippetsFeed(self, text_query=None):
|
||||
"""Retrieve Codesearch feed for a keyword
|
||||
|
||||
Args:
|
||||
text_query : string (optional) The contents of the q query parameter. This
|
||||
string is URL escaped upon conversion to a URI.
|
||||
Returns:
|
||||
A CodesearchFeed objects representing the feed returned by the server
|
||||
"""
|
||||
|
||||
query=gdata.codesearch.service.CodesearchQuery(text_query=text_query)
|
||||
feed = self.Query(query.ToUri())
|
||||
return feed
|
||||
|
||||
|
||||
class CodesearchQuery(gdata.service.Query):
|
||||
"""Object used to construct the query to the Google Codesearch feed. here only as a shorcut"""
|
||||
|
||||
def __init__(self, feed='/codesearch/feeds/search', text_query=None,
|
||||
params=None, categories=None):
|
||||
"""Constructor for Codesearch Query.
|
||||
|
||||
Args:
|
||||
feed: string (optional) The path for the feed. (e.g. '/codesearch/feeds/search')
|
||||
text_query: string (optional) The contents of the q query parameter. This
|
||||
string is URL escaped upon conversion to a URI.
|
||||
params: dict (optional) Parameter value string pairs which become URL
|
||||
params when translated to a URI. These parameters are added to
|
||||
the query's items.
|
||||
categories: list (optional) List of category strings which should be
|
||||
included as query categories. See gdata.service.Query for
|
||||
additional documentation.
|
||||
|
||||
Yelds:
|
||||
A CodesearchQuery object to construct a URI based on Codesearch feed
|
||||
"""
|
||||
|
||||
gdata.service.Query.__init__(self, feed, text_query, params, categories)
|
||||
740
python/gdata/contacts/__init__.py
Normal file
740
python/gdata/contacts/__init__.py
Normal file
@@ -0,0 +1,740 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains extensions to ElementWrapper objects used with Google Contacts."""
|
||||
|
||||
__author__ = 'dbrattli (Dag Brattli)'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
## Constants from http://code.google.com/apis/gdata/elements.html ##
|
||||
REL_HOME = 'http://schemas.google.com/g/2005#home'
|
||||
REL_WORK = 'http://schemas.google.com/g/2005#work'
|
||||
REL_OTHER = 'http://schemas.google.com/g/2005#other'
|
||||
|
||||
# AOL Instant Messenger protocol
|
||||
IM_AIM = 'http://schemas.google.com/g/2005#AIM'
|
||||
IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol
|
||||
IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol
|
||||
IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol
|
||||
IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol
|
||||
# Google Talk protocol
|
||||
IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
|
||||
IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol
|
||||
IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol
|
||||
IM_NETMEETING = 'http://schemas.google.com/g/2005#netmeeting' # NetMeeting
|
||||
|
||||
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
|
||||
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
|
||||
|
||||
# Different phone types, for more info see:
|
||||
# http://code.google.com/apis/gdata/docs/2.0/elements.html#gdPhoneNumber
|
||||
PHONE_CAR = 'http://schemas.google.com/g/2005#car'
|
||||
PHONE_FAX = 'http://schemas.google.com/g/2005#fax'
|
||||
PHONE_GENERAL = 'http://schemas.google.com/g/2005#general'
|
||||
PHONE_HOME = REL_HOME
|
||||
PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax'
|
||||
PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension'
|
||||
PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile'
|
||||
PHONE_OTHER = REL_OTHER
|
||||
PHONE_PAGER = 'http://schemas.google.com/g/2005#pager'
|
||||
PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite'
|
||||
PHONE_VOIP = 'http://schemas.google.com/g/2005#voip'
|
||||
PHONE_WORK = REL_WORK
|
||||
PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax'
|
||||
PHONE_WORK_MOBILE = 'http://schemas.google.com/g/2005#work_mobile'
|
||||
PHONE_WORK_PAGER = 'http://schemas.google.com/g/2005#work_pager'
|
||||
PHONE_MAIN = 'http://schemas.google.com/g/2005#main'
|
||||
PHONE_ASSISTANT = 'http://schemas.google.com/g/2005#assistant'
|
||||
PHONE_CALLBACK = 'http://schemas.google.com/g/2005#callback'
|
||||
PHONE_COMPANY_MAIN = 'http://schemas.google.com/g/2005#company_main'
|
||||
PHONE_ISDN = 'http://schemas.google.com/g/2005#isdn'
|
||||
PHONE_OTHER_FAX = 'http://schemas.google.com/g/2005#other_fax'
|
||||
PHONE_RADIO = 'http://schemas.google.com/g/2005#radio'
|
||||
PHONE_TELEX = 'http://schemas.google.com/g/2005#telex'
|
||||
PHONE_TTY_TDD = 'http://schemas.google.com/g/2005#tty_tdd'
|
||||
|
||||
EXTERNAL_ID_ORGANIZATION = 'organization'
|
||||
|
||||
RELATION_MANAGER = 'manager'
|
||||
|
||||
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
|
||||
|
||||
|
||||
class GDataBase(atom.AtomBase):
|
||||
"""The Google Contacts intermediate class from atom.AtomBase."""
|
||||
|
||||
_namespace = gdata.GDATA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
def __init__(self, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class ContactsBase(GDataBase):
|
||||
"""The Google Contacts intermediate class for Contacts namespace."""
|
||||
|
||||
_namespace = CONTACTS_NAMESPACE
|
||||
|
||||
|
||||
class OrgName(GDataBase):
|
||||
"""The Google Contacts OrgName element."""
|
||||
|
||||
_tag = 'orgName'
|
||||
|
||||
|
||||
class OrgTitle(GDataBase):
|
||||
"""The Google Contacts OrgTitle element."""
|
||||
|
||||
_tag = 'orgTitle'
|
||||
|
||||
|
||||
class OrgDepartment(GDataBase):
|
||||
"""The Google Contacts OrgDepartment element."""
|
||||
|
||||
_tag = 'orgDepartment'
|
||||
|
||||
|
||||
class OrgJobDescription(GDataBase):
|
||||
"""The Google Contacts OrgJobDescription element."""
|
||||
|
||||
_tag = 'orgJobDescription'
|
||||
|
||||
|
||||
class Where(GDataBase):
|
||||
"""The Google Contacts Where element."""
|
||||
|
||||
_tag = 'where'
|
||||
_children = GDataBase._children.copy()
|
||||
_attributes = GDataBase._attributes.copy()
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['label'] = 'label'
|
||||
_attributes['valueString'] = 'value_string'
|
||||
|
||||
def __init__(self, value_string=None, rel=None, label=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.rel = rel
|
||||
self.label = label
|
||||
self.value_string = value_string
|
||||
|
||||
|
||||
class When(GDataBase):
|
||||
"""The Google Contacts When element."""
|
||||
|
||||
_tag = 'when'
|
||||
_children = GDataBase._children.copy()
|
||||
_attributes = GDataBase._attributes.copy()
|
||||
_attributes['startTime'] = 'start_time'
|
||||
_attributes['endTime'] = 'end_time'
|
||||
_attributes['label'] = 'label'
|
||||
|
||||
def __init__(self, start_time=None, end_time=None, label=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.start_time = start_time
|
||||
self.end_time = end_time
|
||||
self.label = label
|
||||
|
||||
|
||||
class Organization(GDataBase):
|
||||
"""The Google Contacts Organization element."""
|
||||
|
||||
_tag = 'organization'
|
||||
_children = GDataBase._children.copy()
|
||||
_attributes = GDataBase._attributes.copy()
|
||||
_attributes['label'] = 'label'
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['primary'] = 'primary'
|
||||
_children['{%s}orgName' % GDataBase._namespace] = (
|
||||
'org_name', OrgName)
|
||||
_children['{%s}orgTitle' % GDataBase._namespace] = (
|
||||
'org_title', OrgTitle)
|
||||
_children['{%s}orgDepartment' % GDataBase._namespace] = (
|
||||
'org_department', OrgDepartment)
|
||||
_children['{%s}orgJobDescription' % GDataBase._namespace] = (
|
||||
'org_job_description', OrgJobDescription)
|
||||
#_children['{%s}where' % GDataBase._namespace] = ('where', Where)
|
||||
|
||||
def __init__(self, label=None, rel=None, primary='false', org_name=None,
|
||||
org_title=None, org_department=None, org_job_description=None,
|
||||
where=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.label = label
|
||||
self.rel = rel or REL_OTHER
|
||||
self.primary = primary
|
||||
self.org_name = org_name
|
||||
self.org_title = org_title
|
||||
self.org_department = org_department
|
||||
self.org_job_description = org_job_description
|
||||
self.where = where
|
||||
|
||||
|
||||
class PostalAddress(GDataBase):
|
||||
"""The Google Contacts PostalAddress element."""
|
||||
|
||||
_tag = 'postalAddress'
|
||||
_children = GDataBase._children.copy()
|
||||
_attributes = GDataBase._attributes.copy()
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['primary'] = 'primary'
|
||||
|
||||
def __init__(self, primary=None, rel=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.rel = rel or REL_OTHER
|
||||
self.primary = primary
|
||||
|
||||
|
||||
class FormattedAddress(GDataBase):
|
||||
"""The Google Contacts FormattedAddress element."""
|
||||
|
||||
_tag = 'formattedAddress'
|
||||
|
||||
|
||||
class StructuredPostalAddress(GDataBase):
|
||||
"""The Google Contacts StructuredPostalAddress element."""
|
||||
|
||||
_tag = 'structuredPostalAddress'
|
||||
_children = GDataBase._children.copy()
|
||||
_attributes = GDataBase._attributes.copy()
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['primary'] = 'primary'
|
||||
_children['{%s}formattedAddress' % GDataBase._namespace] = (
|
||||
'formatted_address', FormattedAddress)
|
||||
|
||||
def __init__(self, rel=None, primary=None,
|
||||
formatted_address=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.rel = rel or REL_OTHER
|
||||
self.primary = primary
|
||||
self.formatted_address = formatted_address
|
||||
|
||||
|
||||
class IM(GDataBase):
|
||||
"""The Google Contacts IM element."""
|
||||
|
||||
_tag = 'im'
|
||||
_children = GDataBase._children.copy()
|
||||
_attributes = GDataBase._attributes.copy()
|
||||
_attributes['address'] = 'address'
|
||||
_attributes['primary'] = 'primary'
|
||||
_attributes['protocol'] = 'protocol'
|
||||
_attributes['label'] = 'label'
|
||||
_attributes['rel'] = 'rel'
|
||||
|
||||
def __init__(self, primary='false', rel=None, address=None, protocol=None,
|
||||
label=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.protocol = protocol
|
||||
self.address = address
|
||||
self.primary = primary
|
||||
self.rel = rel or REL_OTHER
|
||||
self.label = label
|
||||
|
||||
|
||||
class Email(GDataBase):
|
||||
"""The Google Contacts Email element."""
|
||||
|
||||
_tag = 'email'
|
||||
_children = GDataBase._children.copy()
|
||||
_attributes = GDataBase._attributes.copy()
|
||||
_attributes['address'] = 'address'
|
||||
_attributes['primary'] = 'primary'
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['label'] = 'label'
|
||||
|
||||
def __init__(self, label=None, rel=None, address=None, primary='false',
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.label = label
|
||||
self.rel = rel or REL_OTHER
|
||||
self.address = address
|
||||
self.primary = primary
|
||||
|
||||
|
||||
class PhoneNumber(GDataBase):
|
||||
"""The Google Contacts PhoneNumber element."""
|
||||
|
||||
_tag = 'phoneNumber'
|
||||
_children = GDataBase._children.copy()
|
||||
_attributes = GDataBase._attributes.copy()
|
||||
_attributes['label'] = 'label'
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['uri'] = 'uri'
|
||||
_attributes['primary'] = 'primary'
|
||||
|
||||
def __init__(self, label=None, rel=None, uri=None, primary='false',
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.label = label
|
||||
self.rel = rel or REL_OTHER
|
||||
self.uri = uri
|
||||
self.primary = primary
|
||||
|
||||
|
||||
class Nickname(ContactsBase):
|
||||
"""The Google Contacts Nickname element."""
|
||||
|
||||
_tag = 'nickname'
|
||||
|
||||
|
||||
class Occupation(ContactsBase):
|
||||
"""The Google Contacts Occupation element."""
|
||||
|
||||
_tag = 'occupation'
|
||||
|
||||
|
||||
class Gender(ContactsBase):
|
||||
"""The Google Contacts Gender element."""
|
||||
|
||||
_tag = 'gender'
|
||||
_children = ContactsBase._children.copy()
|
||||
_attributes = ContactsBase._attributes.copy()
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, value=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
ContactsBase.__init__(self, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.value = value
|
||||
|
||||
|
||||
class Birthday(ContactsBase):
|
||||
"""The Google Contacts Birthday element."""
|
||||
|
||||
_tag = 'birthday'
|
||||
_children = ContactsBase._children.copy()
|
||||
_attributes = ContactsBase._attributes.copy()
|
||||
_attributes['when'] = 'when'
|
||||
|
||||
def __init__(self, when=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
ContactsBase.__init__(self, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.when = when
|
||||
|
||||
|
||||
class Relation(ContactsBase):
|
||||
"""The Google Contacts Relation element."""
|
||||
|
||||
_tag = 'relation'
|
||||
_children = ContactsBase._children.copy()
|
||||
_attributes = ContactsBase._attributes.copy()
|
||||
_attributes['label'] = 'label'
|
||||
_attributes['rel'] = 'rel'
|
||||
|
||||
def __init__(self, label=None, rel=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
ContactsBase.__init__(self, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.label = label
|
||||
self.rel = rel
|
||||
|
||||
|
||||
def RelationFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Relation, xml_string)
|
||||
|
||||
|
||||
class UserDefinedField(ContactsBase):
|
||||
"""The Google Contacts UserDefinedField element."""
|
||||
|
||||
_tag = 'userDefinedField'
|
||||
_children = ContactsBase._children.copy()
|
||||
_attributes = ContactsBase._attributes.copy()
|
||||
_attributes['key'] = 'key'
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, key=None, value=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
ContactsBase.__init__(self, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.key = key
|
||||
self.value = value
|
||||
|
||||
|
||||
def UserDefinedFieldFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(UserDefinedField, xml_string)
|
||||
|
||||
|
||||
class Website(ContactsBase):
|
||||
"""The Google Contacts Website element."""
|
||||
|
||||
_tag = 'website'
|
||||
_children = ContactsBase._children.copy()
|
||||
_attributes = ContactsBase._attributes.copy()
|
||||
_attributes['href'] = 'href'
|
||||
_attributes['label'] = 'label'
|
||||
_attributes['primary'] = 'primary'
|
||||
_attributes['rel'] = 'rel'
|
||||
|
||||
def __init__(self, href=None, label=None, primary='false', rel=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
ContactsBase.__init__(self, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.href = href
|
||||
self.label = label
|
||||
self.primary = primary
|
||||
self.rel = rel
|
||||
|
||||
|
||||
def WebsiteFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Website, xml_string)
|
||||
|
||||
|
||||
class ExternalId(ContactsBase):
|
||||
"""The Google Contacts ExternalId element."""
|
||||
|
||||
_tag = 'externalId'
|
||||
_children = ContactsBase._children.copy()
|
||||
_attributes = ContactsBase._attributes.copy()
|
||||
_attributes['label'] = 'label'
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, label=None, rel=None, value=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
ContactsBase.__init__(self, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.label = label
|
||||
self.rel = rel
|
||||
self.value = value
|
||||
|
||||
|
||||
def ExternalIdFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(ExternalId, xml_string)
|
||||
|
||||
|
||||
class Event(ContactsBase):
|
||||
"""The Google Contacts Event element."""
|
||||
|
||||
_tag = 'event'
|
||||
_children = ContactsBase._children.copy()
|
||||
_attributes = ContactsBase._attributes.copy()
|
||||
_attributes['label'] = 'label'
|
||||
_attributes['rel'] = 'rel'
|
||||
_children['{%s}when' % ContactsBase._namespace] = ('when', When)
|
||||
|
||||
def __init__(self, label=None, rel=None, when=None,
|
||||
text=None, extension_elements=None, extension_attributes=None):
|
||||
ContactsBase.__init__(self, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.label = label
|
||||
self.rel = rel
|
||||
self.when = when
|
||||
|
||||
|
||||
def EventFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Event, xml_string)
|
||||
|
||||
|
||||
class Deleted(GDataBase):
|
||||
"""The Google Contacts Deleted element."""
|
||||
|
||||
_tag = 'deleted'
|
||||
|
||||
|
||||
class GroupMembershipInfo(ContactsBase):
|
||||
"""The Google Contacts GroupMembershipInfo element."""
|
||||
|
||||
_tag = 'groupMembershipInfo'
|
||||
|
||||
_children = ContactsBase._children.copy()
|
||||
_attributes = ContactsBase._attributes.copy()
|
||||
_attributes['deleted'] = 'deleted'
|
||||
_attributes['href'] = 'href'
|
||||
|
||||
def __init__(self, deleted=None, href=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
ContactsBase.__init__(self, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.deleted = deleted
|
||||
self.href = href
|
||||
|
||||
|
||||
class PersonEntry(gdata.BatchEntry):
|
||||
"""Base class for ContactEntry and ProfileEntry."""
|
||||
|
||||
_children = gdata.BatchEntry._children.copy()
|
||||
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
|
||||
'organization', [Organization])
|
||||
_children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = (
|
||||
'phone_number', [PhoneNumber])
|
||||
_children['{%s}nickname' % CONTACTS_NAMESPACE] = ('nickname', Nickname)
|
||||
_children['{%s}occupation' % CONTACTS_NAMESPACE] = ('occupation', Occupation)
|
||||
_children['{%s}gender' % CONTACTS_NAMESPACE] = ('gender', Gender)
|
||||
_children['{%s}birthday' % CONTACTS_NAMESPACE] = ('birthday', Birthday)
|
||||
_children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address',
|
||||
[PostalAddress])
|
||||
_children['{%s}structuredPostalAddress' % gdata.GDATA_NAMESPACE] = (
|
||||
'structured_postal_address', [StructuredPostalAddress])
|
||||
_children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email])
|
||||
_children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM])
|
||||
_children['{%s}relation' % CONTACTS_NAMESPACE] = ('relation', [Relation])
|
||||
_children['{%s}userDefinedField' % CONTACTS_NAMESPACE] = (
|
||||
'user_defined_field', [UserDefinedField])
|
||||
_children['{%s}website' % CONTACTS_NAMESPACE] = ('website', [Website])
|
||||
_children['{%s}externalId' % CONTACTS_NAMESPACE] = (
|
||||
'external_id', [ExternalId])
|
||||
_children['{%s}event' % CONTACTS_NAMESPACE] = ('event', [Event])
|
||||
# The following line should be removed once the Python support
|
||||
# for GData 2.0 is mature.
|
||||
_attributes = gdata.BatchEntry._attributes.copy()
|
||||
_attributes['{%s}etag' % gdata.GDATA_NAMESPACE] = 'etag'
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None, organization=None, phone_number=None,
|
||||
nickname=None, occupation=None, gender=None, birthday=None,
|
||||
postal_address=None, structured_postal_address=None, email=None,
|
||||
im=None, relation=None, user_defined_field=None, website=None,
|
||||
external_id=None, event=None, batch_operation=None,
|
||||
batch_id=None, batch_status=None, text=None,
|
||||
extension_elements=None, extension_attributes=None, etag=None):
|
||||
gdata.BatchEntry.__init__(self, author=author, category=category,
|
||||
content=content, atom_id=atom_id, link=link,
|
||||
published=published,
|
||||
batch_operation=batch_operation,
|
||||
batch_id=batch_id, batch_status=batch_status,
|
||||
title=title, updated=updated)
|
||||
self.organization = organization or []
|
||||
self.phone_number = phone_number or []
|
||||
self.nickname = nickname
|
||||
self.occupation = occupation
|
||||
self.gender = gender
|
||||
self.birthday = birthday
|
||||
self.postal_address = postal_address or []
|
||||
self.structured_postal_address = structured_postal_address or []
|
||||
self.email = email or []
|
||||
self.im = im or []
|
||||
self.relation = relation or []
|
||||
self.user_defined_field = user_defined_field or []
|
||||
self.website = website or []
|
||||
self.external_id = external_id or []
|
||||
self.event = event or []
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
# The following line should be removed once the Python support
|
||||
# for GData 2.0 is mature.
|
||||
self.etag = etag
|
||||
|
||||
|
||||
class ContactEntry(PersonEntry):
|
||||
"""A Google Contact flavor of an Atom Entry."""
|
||||
|
||||
_children = PersonEntry._children.copy()
|
||||
|
||||
_children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted)
|
||||
_children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = (
|
||||
'group_membership_info', [GroupMembershipInfo])
|
||||
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
|
||||
'extended_property', [gdata.ExtendedProperty])
|
||||
# Overwrite the organization rule in PersonEntry so that a ContactEntry
|
||||
# may only contain one <gd:organization> element.
|
||||
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
|
||||
'organization', Organization)
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None,
|
||||
title=None, updated=None, organization=None, phone_number=None,
|
||||
nickname=None, occupation=None, gender=None, birthday=None,
|
||||
postal_address=None, structured_postal_address=None, email=None,
|
||||
im=None, relation=None, user_defined_field=None, website=None,
|
||||
external_id=None, event=None, batch_operation=None,
|
||||
batch_id=None, batch_status=None, text=None,
|
||||
extension_elements=None, extension_attributes=None, etag=None,
|
||||
deleted=None, extended_property=None,
|
||||
group_membership_info=None):
|
||||
PersonEntry.__init__(self, author=author, category=category,
|
||||
content=content, atom_id=atom_id, link=link,
|
||||
published=published, title=title, updated=updated,
|
||||
organization=organization, phone_number=phone_number,
|
||||
nickname=nickname, occupation=occupation,
|
||||
gender=gender, birthday=birthday,
|
||||
postal_address=postal_address,
|
||||
structured_postal_address=structured_postal_address,
|
||||
email=email, im=im, relation=relation,
|
||||
user_defined_field=user_defined_field,
|
||||
website=website, external_id=external_id, event=event,
|
||||
batch_operation=batch_operation, batch_id=batch_id,
|
||||
batch_status=batch_status, text=text,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, etag=etag)
|
||||
self.deleted = deleted
|
||||
self.extended_property = extended_property or []
|
||||
self.group_membership_info = group_membership_info or []
|
||||
|
||||
def GetPhotoLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == PHOTO_LINK_REL:
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetPhotoEditLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == PHOTO_EDIT_LINK_REL:
|
||||
return a_link
|
||||
return None
|
||||
|
||||
|
||||
def ContactEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(ContactEntry, xml_string)
|
||||
|
||||
|
||||
class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder):
|
||||
"""A Google Contacts feed flavor of an Atom Feed."""
|
||||
|
||||
_children = gdata.BatchFeed._children.copy()
|
||||
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry])
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None,
|
||||
entry=None, total_results=None, start_index=None,
|
||||
items_per_page=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
gdata.BatchFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results,
|
||||
start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def ContactsFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(ContactsFeed, xml_string)
|
||||
|
||||
|
||||
class GroupEntry(gdata.BatchEntry):
|
||||
"""Represents a contact group."""
|
||||
_children = gdata.BatchEntry._children.copy()
|
||||
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
|
||||
'extended_property', [gdata.ExtendedProperty])
|
||||
|
||||
def __init__(self, author=None, category=None, content=None,
|
||||
contributor=None, atom_id=None, link=None, published=None,
|
||||
rights=None, source=None, summary=None, control=None,
|
||||
title=None, updated=None,
|
||||
extended_property=None, batch_operation=None, batch_id=None,
|
||||
batch_status=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
gdata.BatchEntry.__init__(self, author=author, category=category,
|
||||
content=content,
|
||||
atom_id=atom_id, link=link, published=published,
|
||||
batch_operation=batch_operation,
|
||||
batch_id=batch_id, batch_status=batch_status,
|
||||
title=title, updated=updated)
|
||||
self.extended_property = extended_property or []
|
||||
|
||||
|
||||
def GroupEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GroupEntry, xml_string)
|
||||
|
||||
|
||||
class GroupsFeed(gdata.BatchFeed):
|
||||
"""A Google contact groups feed flavor of an Atom Feed."""
|
||||
_children = gdata.BatchFeed._children.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry])
|
||||
|
||||
|
||||
def GroupsFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(GroupsFeed, xml_string)
|
||||
|
||||
|
||||
class ProfileEntry(PersonEntry):
|
||||
"""A Google Profiles flavor of an Atom Entry."""
|
||||
|
||||
|
||||
def ProfileEntryFromString(xml_string):
|
||||
"""Converts an XML string into a ProfileEntry object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Profile entry.
|
||||
|
||||
Returns:
|
||||
A ProfileEntry object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(ProfileEntry, xml_string)
|
||||
|
||||
|
||||
class ProfilesFeed(gdata.BatchFeed, gdata.LinkFinder):
|
||||
"""A Google Profiles feed flavor of an Atom Feed."""
|
||||
|
||||
_children = gdata.BatchFeed._children.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry])
|
||||
|
||||
def __init__(self, author=None, category=None, contributor=None,
|
||||
generator=None, icon=None, atom_id=None, link=None, logo=None,
|
||||
rights=None, subtitle=None, title=None, updated=None,
|
||||
entry=None, total_results=None, start_index=None,
|
||||
items_per_page=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
gdata.BatchFeed.__init__(self, author=author, category=category,
|
||||
contributor=contributor, generator=generator,
|
||||
icon=icon, atom_id=atom_id, link=link,
|
||||
logo=logo, rights=rights, subtitle=subtitle,
|
||||
title=title, updated=updated, entry=entry,
|
||||
total_results=total_results,
|
||||
start_index=start_index,
|
||||
items_per_page=items_per_page,
|
||||
extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
|
||||
def ProfilesFeedFromString(xml_string):
|
||||
"""Converts an XML string into a ProfilesFeed object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Profiles feed.
|
||||
|
||||
Returns:
|
||||
A ProfilesFeed object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(ProfilesFeed, xml_string)
|
||||
495
python/gdata/contacts/client.py
Normal file
495
python/gdata/contacts/client.py
Normal file
@@ -0,0 +1,495 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from types import ListType, DictionaryType
|
||||
|
||||
|
||||
"""Contains a client to communicate with the Contacts servers.
|
||||
|
||||
For documentation on the Contacts API, see:
|
||||
http://code.google.com/apis/contatcs/
|
||||
"""
|
||||
|
||||
__author__ = 'vinces1979@gmail.com (Vince Spicer)'
|
||||
|
||||
|
||||
import gdata.client
|
||||
import gdata.contacts.data
|
||||
import atom.data
|
||||
import atom.http_core
|
||||
import gdata.gauth
|
||||
|
||||
|
||||
class ContactsClient(gdata.client.GDClient):
|
||||
api_version = '3'
|
||||
auth_service = 'cp'
|
||||
server = "www.google.com"
|
||||
contact_list = "default"
|
||||
auth_scopes = gdata.gauth.AUTH_SCOPES['cp']
|
||||
|
||||
|
||||
def __init__(self, domain=None, auth_token=None, **kwargs):
|
||||
"""Constructs a new client for the Email Settings API.
|
||||
|
||||
Args:
|
||||
domain: string The Google Apps domain (if any).
|
||||
kwargs: The other parameters to pass to the gdata.client.GDClient
|
||||
constructor.
|
||||
"""
|
||||
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
|
||||
self.domain = domain
|
||||
|
||||
def get_feed_uri(self, kind='contacts', contact_list=None, projection='full',
|
||||
scheme="http"):
|
||||
"""Builds a feed URI.
|
||||
|
||||
Args:
|
||||
kind: The type of feed to return, typically 'groups' or 'contacts'.
|
||||
Default value: 'contacts'.
|
||||
contact_list: The contact list to return a feed for.
|
||||
Default value: self.contact_list.
|
||||
projection: The projection to apply to the feed contents, for example
|
||||
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
|
||||
scheme: The URL scheme such as 'http' or 'https', None to return a
|
||||
relative URI without hostname.
|
||||
|
||||
Returns:
|
||||
A feed URI using the given kind, contact list, and projection.
|
||||
Example: '/m8/feeds/contacts/default/full'.
|
||||
"""
|
||||
contact_list = contact_list or self.contact_list
|
||||
if kind == 'profiles':
|
||||
contact_list = 'domain/%s' % self.domain
|
||||
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
|
||||
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
|
||||
|
||||
GetFeedUri = get_feed_uri
|
||||
|
||||
def get_contact(self, uri, desired_class=gdata.contacts.data.ContactEntry,
|
||||
auth_token=None, **kwargs):
|
||||
return self.get_feed(uri, auth_token=auth_token,
|
||||
desired_class=desired_class, **kwargs)
|
||||
|
||||
|
||||
GetContact = get_contact
|
||||
|
||||
|
||||
def create_contact(self, new_contact, insert_uri=None, auth_token=None, **kwargs):
|
||||
"""Adds an new contact to Google Contacts.
|
||||
|
||||
Args:
|
||||
new_contact: atom.Entry or subclass A new contact which is to be added to
|
||||
Google Contacts.
|
||||
insert_uri: the URL to post new contacts to the feed
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the contact created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
insert_uri = insert_uri or self.GetFeedUri()
|
||||
return self.Post(new_contact, insert_uri,
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
CreateContact = create_contact
|
||||
|
||||
def add_contact(self, new_contact, insert_uri=None, auth_token=None,
|
||||
billing_information=None, birthday=None, calendar_link=None, **kwargs):
|
||||
"""Adds an new contact to Google Contacts.
|
||||
|
||||
Args:
|
||||
new_contact: atom.Entry or subclass A new contact which is to be added to
|
||||
Google Contacts.
|
||||
insert_uri: the URL to post new contacts to the feed
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the contact created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
|
||||
contact = gdata.contacts.data.ContactEntry()
|
||||
|
||||
if billing_information is not None:
|
||||
if not isinstance(billing_information, gdata.contacts.data.BillingInformation):
|
||||
billing_information = gdata.contacts.data.BillingInformation(text=billing_information)
|
||||
|
||||
contact.billing_information = billing_information
|
||||
|
||||
if birthday is not None:
|
||||
if not isinstance(birthday, gdata.contacts.data.Birthday):
|
||||
birthday = gdata.contacts.data.Birthday(when=birthday)
|
||||
|
||||
contact.birthday = birthday
|
||||
|
||||
if calendar_link is not None:
|
||||
if type(calendar_link) is not ListType:
|
||||
calendar_link = [calendar_link]
|
||||
|
||||
for link in calendar_link:
|
||||
if not isinstance(link, gdata.contacts.data.CalendarLink):
|
||||
if type(link) is not DictionaryType:
|
||||
raise TypeError, "calendar_link Requires dictionary not %s" % type(link)
|
||||
|
||||
link = gdata.contacts.data.CalendarLink(
|
||||
rel=link.get("rel", None),
|
||||
label=link.get("label", None),
|
||||
primary=link.get("primary", None),
|
||||
href=link.get("href", None),
|
||||
)
|
||||
|
||||
contact.calendar_link.append(link)
|
||||
|
||||
insert_uri = insert_uri or self.GetFeedUri()
|
||||
return self.Post(contact, insert_uri,
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
AddContact = add_contact
|
||||
|
||||
def get_contacts(self, desired_class=gdata.contacts.data.ContactsFeed,
|
||||
auth_token=None, **kwargs):
|
||||
"""Obtains a feed with the contacts belonging to the current user.
|
||||
|
||||
Args:
|
||||
auth_token: An object which sets the Authorization HTTP header in its
|
||||
modify_request method. Recommended classes include
|
||||
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
|
||||
among others. Represents the current user. Defaults to None
|
||||
and if None, this method will look for a value in the
|
||||
auth_token member of SpreadsheetsClient.
|
||||
desired_class: class descended from atom.core.XmlElement to which a
|
||||
successful response should be converted. If there is no
|
||||
converter function specified (desired_class=None) then the
|
||||
desired_class will be used in calling the
|
||||
atom.core.parse function. If neither
|
||||
the desired_class nor the converter is specified, an
|
||||
HTTP reponse object will be returned. Defaults to
|
||||
gdata.spreadsheets.data.SpreadsheetsFeed.
|
||||
"""
|
||||
return self.get_feed(self.GetFeedUri(), auth_token=auth_token,
|
||||
desired_class=desired_class, **kwargs)
|
||||
|
||||
GetContacts = get_contacts
|
||||
|
||||
def get_group(self, uri=None, desired_class=gdata.contacts.data.GroupEntry,
|
||||
auth_token=None, **kwargs):
|
||||
""" Get a single groups details
|
||||
Args:
|
||||
uri: the group uri or id
|
||||
"""
|
||||
return self.get(uri, desired_class=desired_class, auth_token=auth_token, **kwargs)
|
||||
|
||||
GetGroup = get_group
|
||||
|
||||
def get_groups(self, uri=None, desired_class=gdata.contacts.data.GroupsFeed,
|
||||
auth_token=None, **kwargs):
|
||||
uri = uri or self.GetFeedUri('groups')
|
||||
return self.get_feed(uri, desired_class=desired_class, auth_token=auth_token, **kwargs)
|
||||
|
||||
GetGroups = get_groups
|
||||
|
||||
def create_group(self, new_group, insert_uri=None, url_params=None,
|
||||
desired_class=None):
|
||||
insert_uri = insert_uri or self.GetFeedUri('groups')
|
||||
return self.Post(new_group, insert_uri, url_params=url_params,
|
||||
desired_class=desired_class)
|
||||
|
||||
CreateGroup = create_group
|
||||
|
||||
def update_group(self, edit_uri, updated_group, url_params=None,
|
||||
escape_params=True, desired_class=None):
|
||||
return self.Put(updated_group, self._CleanUri(edit_uri),
|
||||
url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
desired_class=desired_class)
|
||||
|
||||
UpdateGroup = update_group
|
||||
|
||||
def delete_group(self, group_object, auth_token=None, force=False, **kws):
|
||||
return self.Delete(group_object, auth_token=auth_token, force=force, **kws )
|
||||
|
||||
DeleteGroup = delete_group
|
||||
|
||||
def change_photo(self, media, contact_entry_or_url, content_type=None,
|
||||
content_length=None):
|
||||
"""Change the photo for the contact by uploading a new photo.
|
||||
|
||||
Performs a PUT against the photo edit URL to send the binary data for the
|
||||
photo.
|
||||
|
||||
Args:
|
||||
media: filename, file-like-object, or a gdata.MediaSource object to send.
|
||||
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
|
||||
method will search for an edit photo link URL and
|
||||
perform a PUT to the URL.
|
||||
content_type: str (optional) the mime type for the photo data. This is
|
||||
necessary if media is a file or file name, but if media
|
||||
is a MediaSource object then the media object can contain
|
||||
the mime type. If media_type is set, it will override the
|
||||
mime type in the media object.
|
||||
content_length: int or str (optional) Specifying the content length is
|
||||
only required if media is a file-like object. If media
|
||||
is a filename, the length is determined using
|
||||
os.path.getsize. If media is a MediaSource object, it is
|
||||
assumed that it already contains the content length.
|
||||
"""
|
||||
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
|
||||
url = contact_entry_or_url.GetPhotoEditLink().href
|
||||
else:
|
||||
url = contact_entry_or_url
|
||||
if isinstance(media, gdata.MediaSource):
|
||||
payload = media
|
||||
# If the media object is a file-like object, then use it as the file
|
||||
# handle in the in the MediaSource.
|
||||
elif hasattr(media, 'read'):
|
||||
payload = gdata.MediaSource(file_handle=media,
|
||||
content_type=content_type, content_length=content_length)
|
||||
# Assume that the media object is a file name.
|
||||
else:
|
||||
payload = gdata.MediaSource(content_type=content_type,
|
||||
content_length=content_length, file_path=media)
|
||||
return self.Put(payload, url)
|
||||
|
||||
ChangePhoto = change_photo
|
||||
|
||||
def get_photo(self, contact_entry_or_url):
|
||||
"""Retrives the binary data for the contact's profile photo as a string.
|
||||
|
||||
Args:
|
||||
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
|
||||
containing the photo link's URL. If the contact entry does not
|
||||
contain a photo link, the image will not be fetched and this method
|
||||
will return None.
|
||||
"""
|
||||
# TODO: add the ability to write out the binary image data to a file,
|
||||
# reading and writing a chunk at a time to avoid potentially using up
|
||||
# large amounts of memory.
|
||||
url = None
|
||||
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
|
||||
photo_link = contact_entry_or_url.GetPhotoLink()
|
||||
if photo_link:
|
||||
url = photo_link.href
|
||||
else:
|
||||
url = contact_entry_or_url
|
||||
if url:
|
||||
return self.Get(url).read()
|
||||
else:
|
||||
return None
|
||||
|
||||
GetPhoto = get_photo
|
||||
|
||||
def delete_photo(self, contact_entry_or_url):
|
||||
url = None
|
||||
if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry):
|
||||
url = contact_entry_or_url.GetPhotoEditLink().href
|
||||
else:
|
||||
url = contact_entry_or_url
|
||||
if url:
|
||||
self.Delete(url)
|
||||
|
||||
DeletePhoto = delete_photo
|
||||
|
||||
def get_profiles_feed(self, uri=None):
|
||||
"""Retrieves a feed containing all domain's profiles.
|
||||
|
||||
Args:
|
||||
uri: string (optional) the URL to retrieve the profiles feed,
|
||||
for example /m8/feeds/profiles/default/full
|
||||
|
||||
Returns:
|
||||
On success, a ProfilesFeed containing the profiles.
|
||||
On failure, raises a RequestError.
|
||||
"""
|
||||
|
||||
uri = uri or self.GetFeedUri('profiles')
|
||||
return self.Get(uri,
|
||||
desired_class=gdata.contacts.data.ProfilesFeed)
|
||||
|
||||
GetProfilesFeed = get_profiles_feed
|
||||
|
||||
def get_profile(self, uri):
|
||||
"""Retrieves a domain's profile for the user.
|
||||
|
||||
Args:
|
||||
uri: string the URL to retrieve the profiles feed,
|
||||
for example /m8/feeds/profiles/default/full/username
|
||||
|
||||
Returns:
|
||||
On success, a ProfileEntry containing the profile for the user.
|
||||
On failure, raises a RequestError
|
||||
"""
|
||||
return self.Get(uri,
|
||||
desired_class=gdata.contacts.data.ProfileEntry)
|
||||
|
||||
GetProfile = get_profile
|
||||
|
||||
def update_profile(self, updated_profile, auth_token=None, force=False, **kwargs):
|
||||
"""Updates an existing profile.
|
||||
|
||||
Args:
|
||||
updated_profile: atom.Entry or subclass containing
|
||||
the Atom Entry which will replace the profile which is
|
||||
stored at the edit_url.
|
||||
auth_token: An object which sets the Authorization HTTP header in its
|
||||
modify_request method. Recommended classes include
|
||||
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
|
||||
among others. Represents the current user. Defaults to None
|
||||
and if None, this method will look for a value in the
|
||||
auth_token member of ContactsClient.
|
||||
force: boolean stating whether an update should be forced. Defaults to
|
||||
False. Normally, if a change has been made since the passed in
|
||||
entry was obtained, the server will not overwrite the entry since
|
||||
the changes were based on an obsolete version of the entry.
|
||||
Setting force to True will cause the update to silently
|
||||
overwrite whatever version is present.
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful update, a httplib.HTTPResponse containing the server's
|
||||
response to the PUT request.
|
||||
On failure, raises a RequestError.
|
||||
"""
|
||||
return self.Update(updated_profile, auth_token=auth_token, force=force, **kwargs)
|
||||
|
||||
UpdateProfile = update_profile
|
||||
|
||||
def execute_batch(self, batch_feed, url, desired_class=None):
|
||||
"""Sends a batch request feed to the server.
|
||||
|
||||
Args:
|
||||
batch_feed: gdata.contacts.ContactFeed A feed containing batch
|
||||
request entries. Each entry contains the operation to be performed
|
||||
on the data contained in the entry. For example an entry with an
|
||||
operation type of insert will be used as if the individual entry
|
||||
had been inserted.
|
||||
url: str The batch URL to which these operations should be applied.
|
||||
converter: Function (optional) The function used to convert the server's
|
||||
response to an object.
|
||||
|
||||
Returns:
|
||||
The results of the batch request's execution on the server. If the
|
||||
default converter is used, this is stored in a ContactsFeed.
|
||||
"""
|
||||
return self.Post(batch_feed, url, desired_class=desired_class)
|
||||
|
||||
ExecuteBatch = execute_batch
|
||||
|
||||
def execute_batch_profiles(self, batch_feed, url,
|
||||
desired_class=gdata.contacts.data.ProfilesFeed):
|
||||
"""Sends a batch request feed to the server.
|
||||
|
||||
Args:
|
||||
batch_feed: gdata.profiles.ProfilesFeed A feed containing batch
|
||||
request entries. Each entry contains the operation to be performed
|
||||
on the data contained in the entry. For example an entry with an
|
||||
operation type of insert will be used as if the individual entry
|
||||
had been inserted.
|
||||
url: string The batch URL to which these operations should be applied.
|
||||
converter: Function (optional) The function used to convert the server's
|
||||
response to an object. The default value is
|
||||
gdata.profiles.ProfilesFeedFromString.
|
||||
|
||||
Returns:
|
||||
The results of the batch request's execution on the server. If the
|
||||
default converter is used, this is stored in a ProfilesFeed.
|
||||
"""
|
||||
return self.Post(batch_feed, url, desired_class=desired_class)
|
||||
|
||||
ExecuteBatchProfiles = execute_batch_profiles
|
||||
|
||||
def _CleanUri(self, uri):
|
||||
"""Sanitizes a feed URI.
|
||||
|
||||
Args:
|
||||
uri: The URI to sanitize, can be relative or absolute.
|
||||
|
||||
Returns:
|
||||
The given URI without its http://server prefix, if any.
|
||||
Keeps the leading slash of the URI.
|
||||
"""
|
||||
url_prefix = 'http://%s' % self.server
|
||||
if uri.startswith(url_prefix):
|
||||
uri = uri[len(url_prefix):]
|
||||
return uri
|
||||
|
||||
class ContactsQuery(gdata.client.Query):
|
||||
"""
|
||||
Create a custom Contacts Query
|
||||
|
||||
Full specs can be found at: U{Contacts query parameters reference
|
||||
<http://code.google.com/apis/contacts/docs/3.0/reference.html#Parameters>}
|
||||
"""
|
||||
|
||||
def __init__(self, feed=None, group=None, orderby=None, showdeleted=None,
|
||||
sortorder=None, requirealldeleted=None, **kwargs):
|
||||
"""
|
||||
@param max_results: The maximum number of entries to return. If you want
|
||||
to receive all of the contacts, rather than only the default maximum, you
|
||||
can specify a very large number for max-results.
|
||||
@param start-index: The 1-based index of the first result to be retrieved.
|
||||
@param updated-min: The lower bound on entry update dates.
|
||||
@param group: Constrains the results to only the contacts belonging to the
|
||||
group specified. Value of this parameter specifies group ID
|
||||
@param orderby: Sorting criterion. The only supported value is
|
||||
lastmodified.
|
||||
@param showdeleted: Include deleted contacts in the returned contacts feed
|
||||
@pram sortorder: Sorting order direction. Can be either ascending or
|
||||
descending.
|
||||
@param requirealldeleted: Only relevant if showdeleted and updated-min
|
||||
are also provided. It dictates the behavior of the server in case it
|
||||
detects that placeholders of some entries deleted since the point in
|
||||
time specified as updated-min may have been lost.
|
||||
"""
|
||||
gdata.client.Query.__init__(self, **kwargs)
|
||||
self.group = group
|
||||
self.orderby = orderby
|
||||
self.sortorder = sortorder
|
||||
self.showdeleted = showdeleted
|
||||
|
||||
def modify_request(self, http_request):
|
||||
if self.group:
|
||||
gdata.client._add_query_param('group', self.group, http_request)
|
||||
if self.orderby:
|
||||
gdata.client._add_query_param('orderby', self.orderby, http_request)
|
||||
if self.sortorder:
|
||||
gdata.client._add_query_param('sortorder', self.sortorder, http_request)
|
||||
if self.showdeleted:
|
||||
gdata.client._add_query_param('showdeleted', self.showdeleted, http_request)
|
||||
gdata.client.Query.modify_request(self, http_request)
|
||||
|
||||
ModifyRequest = modify_request
|
||||
|
||||
|
||||
class ProfilesQuery(gdata.client.Query):
|
||||
def __init__(self, feed=None):
|
||||
self.feed = feed or 'http://www.google.com/m8/feeds/profiles/default/full'
|
||||
|
||||
|
||||
|
||||
474
python/gdata/contacts/data.py
Normal file
474
python/gdata/contacts/data.py
Normal file
@@ -0,0 +1,474 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Data model classes for parsing and generating XML for the Contacts API."""
|
||||
|
||||
|
||||
__author__ = 'vinces1979@gmail.com (Vince Spicer)'
|
||||
|
||||
|
||||
import atom.core
|
||||
import gdata
|
||||
import gdata.data
|
||||
|
||||
|
||||
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
|
||||
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
|
||||
|
||||
EXTERNAL_ID_ORGANIZATION = 'organization'
|
||||
|
||||
RELATION_MANAGER = 'manager'
|
||||
|
||||
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
|
||||
CONTACTS_TEMPLATE = '{%s}%%s' % CONTACTS_NAMESPACE
|
||||
|
||||
|
||||
class BillingInformation(atom.core.XmlElement):
|
||||
"""
|
||||
gContact:billingInformation
|
||||
Specifies billing information of the entity represented by the contact. The element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'billingInformation'
|
||||
|
||||
|
||||
class Birthday(atom.core.XmlElement):
|
||||
"""
|
||||
Stores birthday date of the person represented by the contact. The element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'birthday'
|
||||
when = 'when'
|
||||
|
||||
|
||||
class CalendarLink(atom.core.XmlElement):
|
||||
"""
|
||||
Storage for URL of the contact's calendar. The element can be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'calendarLink'
|
||||
rel = 'rel'
|
||||
label = 'label'
|
||||
primary = 'primary'
|
||||
href = 'href'
|
||||
|
||||
|
||||
class DirectoryServer(atom.core.XmlElement):
|
||||
"""
|
||||
A directory server associated with this contact.
|
||||
May not be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'directoryServer'
|
||||
|
||||
|
||||
class Event(atom.core.XmlElement):
|
||||
"""
|
||||
These elements describe events associated with a contact.
|
||||
They may be repeated
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'event'
|
||||
label = 'label'
|
||||
rel = 'rel'
|
||||
when = gdata.data.When
|
||||
|
||||
|
||||
class ExternalId(atom.core.XmlElement):
|
||||
"""
|
||||
Describes an ID of the contact in an external system of some kind.
|
||||
This element may be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'externalId'
|
||||
|
||||
|
||||
def ExternalIdFromString(xml_string):
|
||||
return atom.core.parse(ExternalId, xml_string)
|
||||
|
||||
|
||||
class Gender(atom.core.XmlElement):
|
||||
"""
|
||||
Specifies the gender of the person represented by the contact.
|
||||
The element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'directoryServer'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class Hobby(atom.core.XmlElement):
|
||||
"""
|
||||
Describes an ID of the contact in an external system of some kind.
|
||||
This element may be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'hobby'
|
||||
|
||||
|
||||
class Initials(atom.core.XmlElement):
|
||||
""" Specifies the initials of the person represented by the contact. The
|
||||
element cannot be repeated. """
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'initials'
|
||||
|
||||
|
||||
class Jot(atom.core.XmlElement):
|
||||
"""
|
||||
Storage for arbitrary pieces of information about the contact. Each jot
|
||||
has a type specified by the rel attribute and a text value.
|
||||
The element can be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'jot'
|
||||
rel = 'rel'
|
||||
|
||||
|
||||
class Language(atom.core.XmlElement):
|
||||
"""
|
||||
Specifies the preferred languages of the contact.
|
||||
The element can be repeated.
|
||||
|
||||
The language must be specified using one of two mutually exclusive methods:
|
||||
using the freeform @label attribute, or using the @code attribute, whose value
|
||||
must conform to the IETF BCP 47 specification.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'language'
|
||||
code = 'code'
|
||||
label = 'label'
|
||||
|
||||
|
||||
class MaidenName(atom.core.XmlElement):
|
||||
"""
|
||||
Specifies maiden name of the person represented by the contact.
|
||||
The element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'maidenName'
|
||||
|
||||
|
||||
class Mileage(atom.core.XmlElement):
|
||||
"""
|
||||
Specifies the mileage for the entity represented by the contact.
|
||||
Can be used for example to document distance needed for reimbursement
|
||||
purposes. The value is not interpreted. The element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'mileage'
|
||||
|
||||
|
||||
class NickName(atom.core.XmlElement):
|
||||
"""
|
||||
Specifies the nickname of the person represented by the contact.
|
||||
The element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'nickname'
|
||||
|
||||
|
||||
class Occupation(atom.core.XmlElement):
|
||||
"""
|
||||
Specifies the occupation/profession of the person specified by the contact.
|
||||
The element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'occupation'
|
||||
|
||||
|
||||
class Priority(atom.core.XmlElement):
|
||||
"""
|
||||
Classifies importance of the contact into 3 categories:
|
||||
* Low
|
||||
* Normal
|
||||
* High
|
||||
|
||||
The priority element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'priority'
|
||||
|
||||
|
||||
class Relation(atom.core.XmlElement):
|
||||
"""
|
||||
This element describe another entity (usually a person) that is in a
|
||||
relation of some kind with the contact.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'relation'
|
||||
rel = 'rel'
|
||||
label = 'label'
|
||||
|
||||
|
||||
class Sensitivity(atom.core.XmlElement):
|
||||
"""
|
||||
Classifies sensitivity of the contact into the following categories:
|
||||
* Confidential
|
||||
* Normal
|
||||
* Personal
|
||||
* Private
|
||||
|
||||
The sensitivity element cannot be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'sensitivity'
|
||||
rel = 'rel'
|
||||
|
||||
|
||||
class UserDefinedField(atom.core.XmlElement):
|
||||
"""
|
||||
Represents an arbitrary key-value pair attached to the contact.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'userDefinedField'
|
||||
key = 'key'
|
||||
value = 'value'
|
||||
|
||||
|
||||
def UserDefinedFieldFromString(xml_string):
|
||||
return atom.core.parse(UserDefinedField, xml_string)
|
||||
|
||||
|
||||
class Website(atom.core.XmlElement):
|
||||
"""
|
||||
Describes websites associated with the contact, including links.
|
||||
May be repeated.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'website'
|
||||
|
||||
href = 'href'
|
||||
label = 'label'
|
||||
primary = 'primary'
|
||||
rel = 'rel'
|
||||
|
||||
|
||||
def WebsiteFromString(xml_string):
|
||||
return atom.core.parse(Website, xml_string)
|
||||
|
||||
|
||||
class HouseName(atom.core.XmlElement):
|
||||
"""
|
||||
Used in places where houses or buildings have names (and
|
||||
not necessarily numbers), eg. "The Pillars".
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'housename'
|
||||
|
||||
|
||||
class Street(atom.core.XmlElement):
|
||||
"""
|
||||
Can be street, avenue, road, etc. This element also includes the house
|
||||
number and room/apartment/flat/floor number.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'street'
|
||||
|
||||
|
||||
class POBox(atom.core.XmlElement):
|
||||
"""
|
||||
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually but not
|
||||
always mutually exclusive with street
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'pobox'
|
||||
|
||||
|
||||
class Neighborhood(atom.core.XmlElement):
|
||||
"""
|
||||
This is used to disambiguate a street address when a city contains more than
|
||||
one street with the same name, or to specify a small place whose mail is
|
||||
routed through a larger postal town. In China it could be a county or a
|
||||
minor city.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'neighborhood'
|
||||
|
||||
|
||||
class City(atom.core.XmlElement):
|
||||
"""
|
||||
Can be city, village, town, borough, etc. This is the postal town and not
|
||||
necessarily the place of residence or place of business.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'city'
|
||||
|
||||
|
||||
class SubRegion(atom.core.XmlElement):
|
||||
"""
|
||||
Handles administrative districts such as U.S. or U.K. counties that are not
|
||||
used for mail addressing purposes. Subregion is not intended for
|
||||
delivery addresses.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'subregion'
|
||||
|
||||
|
||||
class Region(atom.core.XmlElement):
|
||||
"""
|
||||
A state, province, county (in Ireland), Land (in Germany),
|
||||
departement (in France), etc.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'region'
|
||||
|
||||
|
||||
class PostalCode(atom.core.XmlElement):
|
||||
"""
|
||||
Postal code. Usually country-wide, but sometimes specific to the
|
||||
city (e.g. "2" in "Dublin 2, Ireland" addresses).
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'postcode'
|
||||
|
||||
|
||||
class Country(atom.core.XmlElement):
|
||||
""" The name or code of the country. """
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'country'
|
||||
|
||||
|
||||
class PersonEntry(gdata.data.BatchEntry):
|
||||
"""Represents a google contact"""
|
||||
|
||||
billing_information = BillingInformation
|
||||
birthday = Birthday
|
||||
calendar_link = [CalendarLink]
|
||||
directory_server = DirectoryServer
|
||||
event = [Event]
|
||||
external_id = [ExternalId]
|
||||
gender = Gender
|
||||
hobby = [Hobby]
|
||||
initals = Initials
|
||||
jot = [Jot]
|
||||
language= [Language]
|
||||
maiden_name = MaidenName
|
||||
mileage = Mileage
|
||||
nickname = NickName
|
||||
occupation = Occupation
|
||||
priority = Priority
|
||||
relation = [Relation]
|
||||
sensitivity = Sensitivity
|
||||
user_defined_field = [UserDefinedField]
|
||||
website = [Website]
|
||||
|
||||
name = gdata.data.Name
|
||||
phone_number = [gdata.data.PhoneNumber]
|
||||
organization = gdata.data.Organization
|
||||
postal_address = [gdata.data.PostalAddress]
|
||||
email = [gdata.data.Email]
|
||||
im = [gdata.data.Im]
|
||||
structured_postal_address = [gdata.data.StructuredPostalAddress]
|
||||
extended_property = [gdata.data.ExtendedProperty]
|
||||
|
||||
|
||||
class Deleted(atom.core.XmlElement):
|
||||
"""If present, indicates that this contact has been deleted."""
|
||||
_qname = gdata.GDATA_TEMPLATE % 'deleted'
|
||||
|
||||
|
||||
class GroupMembershipInfo(atom.core.XmlElement):
|
||||
"""
|
||||
Identifies the group to which the contact belongs or belonged.
|
||||
The group is referenced by its id.
|
||||
"""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'groupMembershipInfo'
|
||||
|
||||
href = 'href'
|
||||
deleted = 'deleted'
|
||||
|
||||
|
||||
class ContactEntry(PersonEntry):
|
||||
"""A Google Contacts flavor of an Atom Entry."""
|
||||
|
||||
deleted = Deleted
|
||||
group_membership_info = [GroupMembershipInfo]
|
||||
organization = gdata.data.Organization
|
||||
|
||||
def GetPhotoLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == PHOTO_LINK_REL:
|
||||
return a_link
|
||||
return None
|
||||
|
||||
def GetPhotoEditLink(self):
|
||||
for a_link in self.link:
|
||||
if a_link.rel == PHOTO_EDIT_LINK_REL:
|
||||
return a_link
|
||||
return None
|
||||
|
||||
|
||||
class ContactsFeed(gdata.data.BatchFeed):
|
||||
"""A collection of Contacts."""
|
||||
entry = [ContactEntry]
|
||||
|
||||
|
||||
class SystemGroup(atom.core.XmlElement):
|
||||
"""The contacts systemGroup element.
|
||||
|
||||
When used within a contact group entry, indicates that the group in
|
||||
question is one of the predefined system groups."""
|
||||
|
||||
_qname = CONTACTS_TEMPLATE % 'systemGroup'
|
||||
id = 'id'
|
||||
|
||||
|
||||
class GroupEntry(gdata.data.BatchEntry):
|
||||
"""Represents a contact group."""
|
||||
extended_property = [gdata.data.ExtendedProperty]
|
||||
system_group = SystemGroup
|
||||
|
||||
|
||||
class GroupsFeed(gdata.data.BatchFeed):
|
||||
"""A Google contact groups feed flavor of an Atom Feed."""
|
||||
entry = [GroupEntry]
|
||||
|
||||
|
||||
class ProfileEntry(PersonEntry):
|
||||
"""A Google Profiles flavor of an Atom Entry."""
|
||||
|
||||
|
||||
def ProfileEntryFromString(xml_string):
|
||||
"""Converts an XML string into a ProfileEntry object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Profile entry.
|
||||
|
||||
Returns:
|
||||
A ProfileEntry object corresponding to the given XML.
|
||||
"""
|
||||
return atom.core.parse(ProfileEntry, xml_string)
|
||||
|
||||
|
||||
class ProfilesFeed(gdata.data.BatchFeed):
|
||||
"""A Google Profiles feed flavor of an Atom Feed."""
|
||||
_qname = atom.data.ATOM_TEMPLATE % 'feed'
|
||||
entry = [ProfileEntry]
|
||||
|
||||
|
||||
def ProfilesFeedFromString(xml_string):
|
||||
"""Converts an XML string into a ProfilesFeed object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Profiles feed.
|
||||
|
||||
Returns:
|
||||
A ProfilesFeed object corresponding to the given XML.
|
||||
"""
|
||||
return atom.core.parse(ProfilesFeed, xml_string)
|
||||
|
||||
|
||||
427
python/gdata/contacts/service.py
Normal file
427
python/gdata/contacts/service.py
Normal file
@@ -0,0 +1,427 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""ContactsService extends the GDataService for Google Contacts operations.
|
||||
|
||||
ContactsService: Provides methods to query feeds and manipulate items.
|
||||
Extends GDataService.
|
||||
|
||||
DictionaryToParamList: Function which converts a dictionary into a list of
|
||||
URL arguments (represented as strings). This is a
|
||||
utility function used in CRUD operations.
|
||||
"""
|
||||
|
||||
__author__ = 'dbrattli (Dag Brattli)'
|
||||
|
||||
|
||||
import gdata
|
||||
import gdata.calendar
|
||||
import gdata.service
|
||||
|
||||
|
||||
DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full'
|
||||
'/batch')
|
||||
DEFAULT_PROFILES_BATCH_URL = ('http://www.google.com'
|
||||
'/m8/feeds/profiles/default/full/batch')
|
||||
|
||||
GDATA_VER_HEADER = 'GData-Version'
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RequestError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class ContactsService(gdata.service.GDataService):
|
||||
"""Client for the Google Contacts service."""
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server='www.google.com', additional_headers=None,
|
||||
contact_list='default', **kwargs):
|
||||
"""Creates a client for the Contacts service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'www.google.com'.
|
||||
contact_list: string (optional) The name of the default contact list to
|
||||
use when no URI is specified to the methods of the service.
|
||||
Default value: 'default' (the logged in user's contact list).
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
|
||||
self.contact_list = contact_list
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service='cp', source=source,
|
||||
server=server, additional_headers=additional_headers, **kwargs)
|
||||
|
||||
def GetFeedUri(self, kind='contacts', contact_list=None, projection='full',
|
||||
scheme=None):
|
||||
"""Builds a feed URI.
|
||||
|
||||
Args:
|
||||
kind: The type of feed to return, typically 'groups' or 'contacts'.
|
||||
Default value: 'contacts'.
|
||||
contact_list: The contact list to return a feed for.
|
||||
Default value: self.contact_list.
|
||||
projection: The projection to apply to the feed contents, for example
|
||||
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
|
||||
scheme: The URL scheme such as 'http' or 'https', None to return a
|
||||
relative URI without hostname.
|
||||
|
||||
Returns:
|
||||
A feed URI using the given kind, contact list, and projection.
|
||||
Example: '/m8/feeds/contacts/default/full'.
|
||||
"""
|
||||
contact_list = contact_list or self.contact_list
|
||||
if kind == 'profiles':
|
||||
contact_list = 'domain/%s' % contact_list
|
||||
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
|
||||
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
|
||||
|
||||
def GetContactsFeed(self, uri=None):
|
||||
uri = uri or self.GetFeedUri()
|
||||
return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString)
|
||||
|
||||
def GetContact(self, uri):
|
||||
return self.Get(uri, converter=gdata.contacts.ContactEntryFromString)
|
||||
|
||||
def CreateContact(self, new_contact, insert_uri=None, url_params=None,
|
||||
escape_params=True):
|
||||
"""Adds an new contact to Google Contacts.
|
||||
|
||||
Args:
|
||||
new_contact: atom.Entry or subclass A new contact which is to be added to
|
||||
Google Contacts.
|
||||
insert_uri: the URL to post new contacts to the feed
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the insertion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful insert, an entry containing the contact created
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
insert_uri = insert_uri or self.GetFeedUri()
|
||||
return self.Post(new_contact, insert_uri, url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.contacts.ContactEntryFromString)
|
||||
|
||||
def UpdateContact(self, edit_uri, updated_contact, url_params=None,
|
||||
escape_params=True):
|
||||
"""Updates an existing contact.
|
||||
|
||||
Args:
|
||||
edit_uri: string The edit link URI for the element being updated
|
||||
updated_contact: string, atom.Entry or subclass containing
|
||||
the Atom Entry which will replace the contact which is
|
||||
stored at the edit_url
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the update request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful update, a httplib.HTTPResponse containing the server's
|
||||
response to the PUT request.
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
return self.Put(updated_contact, self._CleanUri(edit_uri),
|
||||
url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.contacts.ContactEntryFromString)
|
||||
|
||||
def DeleteContact(self, edit_uri, extra_headers=None,
|
||||
url_params=None, escape_params=True):
|
||||
"""Removes an contact with the specified ID from Google Contacts.
|
||||
|
||||
Args:
|
||||
edit_uri: string The edit URL of the entry to be deleted. Example:
|
||||
'/m8/feeds/contacts/default/full/xxx/yyy'
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the deletion request.
|
||||
escape_params: boolean (optional) If true, the url_parameters will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful delete, a httplib.HTTPResponse containing the server's
|
||||
response to the DELETE request.
|
||||
On failure, a RequestError is raised of the form:
|
||||
{'status': HTTP status code from server,
|
||||
'reason': HTTP reason from the server,
|
||||
'body': HTTP body of the server's response}
|
||||
"""
|
||||
return self.Delete(self._CleanUri(edit_uri),
|
||||
url_params=url_params, escape_params=escape_params)
|
||||
|
||||
def GetGroupsFeed(self, uri=None):
|
||||
uri = uri or self.GetFeedUri('groups')
|
||||
return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString)
|
||||
|
||||
def CreateGroup(self, new_group, insert_uri=None, url_params=None,
|
||||
escape_params=True):
|
||||
insert_uri = insert_uri or self.GetFeedUri('groups')
|
||||
return self.Post(new_group, insert_uri, url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.contacts.GroupEntryFromString)
|
||||
|
||||
def UpdateGroup(self, edit_uri, updated_group, url_params=None,
|
||||
escape_params=True):
|
||||
return self.Put(updated_group, self._CleanUri(edit_uri),
|
||||
url_params=url_params,
|
||||
escape_params=escape_params,
|
||||
converter=gdata.contacts.GroupEntryFromString)
|
||||
|
||||
def DeleteGroup(self, edit_uri, extra_headers=None,
|
||||
url_params=None, escape_params=True):
|
||||
return self.Delete(self._CleanUri(edit_uri),
|
||||
url_params=url_params, escape_params=escape_params)
|
||||
|
||||
def ChangePhoto(self, media, contact_entry_or_url, content_type=None,
|
||||
content_length=None):
|
||||
"""Change the photo for the contact by uploading a new photo.
|
||||
|
||||
Performs a PUT against the photo edit URL to send the binary data for the
|
||||
photo.
|
||||
|
||||
Args:
|
||||
media: filename, file-like-object, or a gdata.MediaSource object to send.
|
||||
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
|
||||
method will search for an edit photo link URL and
|
||||
perform a PUT to the URL.
|
||||
content_type: str (optional) the mime type for the photo data. This is
|
||||
necessary if media is a file or file name, but if media
|
||||
is a MediaSource object then the media object can contain
|
||||
the mime type. If media_type is set, it will override the
|
||||
mime type in the media object.
|
||||
content_length: int or str (optional) Specifying the content length is
|
||||
only required if media is a file-like object. If media
|
||||
is a filename, the length is determined using
|
||||
os.path.getsize. If media is a MediaSource object, it is
|
||||
assumed that it already contains the content length.
|
||||
"""
|
||||
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
|
||||
url = contact_entry_or_url.GetPhotoEditLink().href
|
||||
else:
|
||||
url = contact_entry_or_url
|
||||
if isinstance(media, gdata.MediaSource):
|
||||
payload = media
|
||||
# If the media object is a file-like object, then use it as the file
|
||||
# handle in the in the MediaSource.
|
||||
elif hasattr(media, 'read'):
|
||||
payload = gdata.MediaSource(file_handle=media,
|
||||
content_type=content_type, content_length=content_length)
|
||||
# Assume that the media object is a file name.
|
||||
else:
|
||||
payload = gdata.MediaSource(content_type=content_type,
|
||||
content_length=content_length, file_path=media)
|
||||
return self.Put(payload, url)
|
||||
|
||||
def GetPhoto(self, contact_entry_or_url):
|
||||
"""Retrives the binary data for the contact's profile photo as a string.
|
||||
|
||||
Args:
|
||||
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
|
||||
containing the photo link's URL. If the contact entry does not
|
||||
contain a photo link, the image will not be fetched and this method
|
||||
will return None.
|
||||
"""
|
||||
# TODO: add the ability to write out the binary image data to a file,
|
||||
# reading and writing a chunk at a time to avoid potentially using up
|
||||
# large amounts of memory.
|
||||
url = None
|
||||
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
|
||||
photo_link = contact_entry_or_url.GetPhotoLink()
|
||||
if photo_link:
|
||||
url = photo_link.href
|
||||
else:
|
||||
url = contact_entry_or_url
|
||||
if url:
|
||||
return self.Get(url, converter=str)
|
||||
else:
|
||||
return None
|
||||
|
||||
def DeletePhoto(self, contact_entry_or_url):
|
||||
url = None
|
||||
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
|
||||
url = contact_entry_or_url.GetPhotoEditLink().href
|
||||
else:
|
||||
url = contact_entry_or_url
|
||||
if url:
|
||||
self.Delete(url)
|
||||
|
||||
def GetProfilesFeed(self, uri=None):
|
||||
"""Retrieves a feed containing all domain's profiles.
|
||||
|
||||
Args:
|
||||
uri: string (optional) the URL to retrieve the profiles feed,
|
||||
for example /m8/feeds/profiles/default/full
|
||||
|
||||
Returns:
|
||||
On success, a ProfilesFeed containing the profiles.
|
||||
On failure, raises a RequestError.
|
||||
"""
|
||||
|
||||
uri = uri or self.GetFeedUri('profiles')
|
||||
return self.Get(uri,
|
||||
converter=gdata.contacts.ProfilesFeedFromString)
|
||||
|
||||
def GetProfile(self, uri):
|
||||
"""Retrieves a domain's profile for the user.
|
||||
|
||||
Args:
|
||||
uri: string the URL to retrieve the profiles feed,
|
||||
for example /m8/feeds/profiles/default/full/username
|
||||
|
||||
Returns:
|
||||
On success, a ProfileEntry containing the profile for the user.
|
||||
On failure, raises a RequestError
|
||||
"""
|
||||
return self.Get(uri,
|
||||
converter=gdata.contacts.ProfileEntryFromString)
|
||||
|
||||
def UpdateProfile(self, edit_uri, updated_profile, url_params=None,
|
||||
escape_params=True):
|
||||
"""Updates an existing profile.
|
||||
|
||||
Args:
|
||||
edit_uri: string The edit link URI for the element being updated
|
||||
updated_profile: string atom.Entry or subclass containing
|
||||
the Atom Entry which will replace the profile which is
|
||||
stored at the edit_url.
|
||||
url_params: dict (optional) Additional URL parameters to be included
|
||||
in the update request.
|
||||
escape_params: boolean (optional) If true, the url_params will be
|
||||
escaped before they are included in the request.
|
||||
|
||||
Returns:
|
||||
On successful update, a httplib.HTTPResponse containing the server's
|
||||
response to the PUT request.
|
||||
On failure, raises a RequestError.
|
||||
"""
|
||||
return self.Put(updated_profile, self._CleanUri(edit_uri),
|
||||
url_params=url_params, escape_params=escape_params,
|
||||
converter=gdata.contacts.ProfileEntryFromString)
|
||||
|
||||
def ExecuteBatch(self, batch_feed, url,
|
||||
converter=gdata.contacts.ContactsFeedFromString):
|
||||
"""Sends a batch request feed to the server.
|
||||
|
||||
Args:
|
||||
batch_feed: gdata.contacts.ContactFeed A feed containing batch
|
||||
request entries. Each entry contains the operation to be performed
|
||||
on the data contained in the entry. For example an entry with an
|
||||
operation type of insert will be used as if the individual entry
|
||||
had been inserted.
|
||||
url: str The batch URL to which these operations should be applied.
|
||||
converter: Function (optional) The function used to convert the server's
|
||||
response to an object. The default value is ContactsFeedFromString.
|
||||
|
||||
Returns:
|
||||
The results of the batch request's execution on the server. If the
|
||||
default converter is used, this is stored in a ContactsFeed.
|
||||
"""
|
||||
return self.Post(batch_feed, url, converter=converter)
|
||||
|
||||
def ExecuteBatchProfiles(self, batch_feed, url,
|
||||
converter=gdata.contacts.ProfilesFeedFromString):
|
||||
"""Sends a batch request feed to the server.
|
||||
|
||||
Args:
|
||||
batch_feed: gdata.profiles.ProfilesFeed A feed containing batch
|
||||
request entries. Each entry contains the operation to be performed
|
||||
on the data contained in the entry. For example an entry with an
|
||||
operation type of insert will be used as if the individual entry
|
||||
had been inserted.
|
||||
url: string The batch URL to which these operations should be applied.
|
||||
converter: Function (optional) The function used to convert the server's
|
||||
response to an object. The default value is
|
||||
gdata.profiles.ProfilesFeedFromString.
|
||||
|
||||
Returns:
|
||||
The results of the batch request's execution on the server. If the
|
||||
default converter is used, this is stored in a ProfilesFeed.
|
||||
"""
|
||||
return self.Post(batch_feed, url, converter=converter)
|
||||
|
||||
def _CleanUri(self, uri):
|
||||
"""Sanitizes a feed URI.
|
||||
|
||||
Args:
|
||||
uri: The URI to sanitize, can be relative or absolute.
|
||||
|
||||
Returns:
|
||||
The given URI without its http://server prefix, if any.
|
||||
Keeps the leading slash of the URI.
|
||||
"""
|
||||
url_prefix = 'http://%s' % self.server
|
||||
if uri.startswith(url_prefix):
|
||||
uri = uri[len(url_prefix):]
|
||||
return uri
|
||||
|
||||
class ContactsQuery(gdata.service.Query):
|
||||
|
||||
def __init__(self, feed=None, text_query=None, params=None,
|
||||
categories=None, group=None):
|
||||
self.feed = feed or '/m8/feeds/contacts/default/full'
|
||||
if group:
|
||||
self._SetGroup(group)
|
||||
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
|
||||
params=params, categories=categories)
|
||||
|
||||
def _GetGroup(self):
|
||||
if 'group' in self:
|
||||
return self['group']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _SetGroup(self, group_id):
|
||||
self['group'] = group_id
|
||||
|
||||
group = property(_GetGroup, _SetGroup,
|
||||
doc='The group query parameter to find only contacts in this group')
|
||||
|
||||
class GroupsQuery(gdata.service.Query):
|
||||
|
||||
def __init__(self, feed=None, text_query=None, params=None,
|
||||
categories=None):
|
||||
self.feed = feed or '/m8/feeds/groups/default/full'
|
||||
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
|
||||
params=params, categories=categories)
|
||||
|
||||
|
||||
class ProfilesQuery(gdata.service.Query):
|
||||
"""Constructs a query object for the profiles feed."""
|
||||
|
||||
def __init__(self, feed=None, text_query=None, params=None,
|
||||
categories=None):
|
||||
self.feed = feed or '/m8/feeds/profiles/default/full'
|
||||
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
|
||||
params=params, categories=categories)
|
||||
279
python/gdata/core.py
Normal file
279
python/gdata/core.py
Normal file
@@ -0,0 +1,279 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2010 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# This module is used for version 2 of the Google Data APIs.
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
"""Provides classes and methods for working with JSON-C.
|
||||
|
||||
This module is experimental and subject to backwards incompatible changes.
|
||||
|
||||
Jsonc: Class which represents JSON-C data and provides pythonic member
|
||||
access which is a bit cleaner than working with plain old dicts.
|
||||
parse_json: Converts a JSON-C string into a Jsonc object.
|
||||
jsonc_to_string: Converts a Jsonc object into a string of JSON-C.
|
||||
"""
|
||||
|
||||
|
||||
try:
|
||||
import simplejson
|
||||
except ImportError:
|
||||
try:
|
||||
# Try to import from django, should work on App Engine
|
||||
from django.utils import simplejson
|
||||
except ImportError:
|
||||
# Should work for Python2.6 and higher.
|
||||
import json as simplejson
|
||||
|
||||
|
||||
def _convert_to_jsonc(x):
|
||||
"""Builds a Jsonc objects which wraps the argument's members."""
|
||||
|
||||
if isinstance(x, dict):
|
||||
jsonc_obj = Jsonc()
|
||||
# Recursively transform all members of the dict.
|
||||
# When converting a dict, we do not convert _name items into private
|
||||
# Jsonc members.
|
||||
for key, value in x.iteritems():
|
||||
jsonc_obj._dict[key] = _convert_to_jsonc(value)
|
||||
return jsonc_obj
|
||||
elif isinstance(x, list):
|
||||
# Recursively transform all members of the list.
|
||||
members = []
|
||||
for item in x:
|
||||
members.append(_convert_to_jsonc(item))
|
||||
return members
|
||||
else:
|
||||
# Return the base object.
|
||||
return x
|
||||
|
||||
|
||||
def parse_json(json_string):
|
||||
"""Converts a JSON-C string into a Jsonc object.
|
||||
|
||||
Args:
|
||||
json_string: str or unicode The JSON to be parsed.
|
||||
|
||||
Returns:
|
||||
A new Jsonc object.
|
||||
"""
|
||||
|
||||
return _convert_to_jsonc(simplejson.loads(json_string))
|
||||
|
||||
|
||||
def parse_json_file(json_file):
|
||||
return _convert_to_jsonc(simplejson.load(json_file))
|
||||
|
||||
|
||||
def jsonc_to_string(jsonc_obj):
|
||||
"""Converts a Jsonc object into a string of JSON-C."""
|
||||
|
||||
return simplejson.dumps(_convert_to_object(jsonc_obj))
|
||||
|
||||
|
||||
def prettify_jsonc(jsonc_obj, indentation=2):
|
||||
"""Converts a Jsonc object to a pretified (intented) JSON string."""
|
||||
|
||||
return simplejson.dumps(_convert_to_object(jsonc_obj), indent=indentation)
|
||||
|
||||
|
||||
|
||||
def _convert_to_object(jsonc_obj):
|
||||
"""Creates a new dict or list which has the data in the Jsonc object.
|
||||
|
||||
Used to convert the Jsonc object to a plain old Python object to simplify
|
||||
conversion to a JSON-C string.
|
||||
|
||||
Args:
|
||||
jsonc_obj: A Jsonc object to be converted into simple Python objects
|
||||
(dicts, lists, etc.)
|
||||
|
||||
Returns:
|
||||
Either a dict, list, or other object with members converted from Jsonc
|
||||
objects to the corresponding simple Python object.
|
||||
"""
|
||||
|
||||
if isinstance(jsonc_obj, Jsonc):
|
||||
plain = {}
|
||||
for key, value in jsonc_obj._dict.iteritems():
|
||||
plain[key] = _convert_to_object(value)
|
||||
return plain
|
||||
elif isinstance(jsonc_obj, list):
|
||||
plain = []
|
||||
for item in jsonc_obj:
|
||||
plain.append(_convert_to_object(item))
|
||||
return plain
|
||||
else:
|
||||
return jsonc_obj
|
||||
|
||||
|
||||
def _to_jsonc_name(member_name):
|
||||
"""Converts a Python style member name to a JSON-C style name.
|
||||
|
||||
JSON-C uses camelCaseWithLower while Python tends to use
|
||||
lower_with_underscores so this method converts as follows:
|
||||
|
||||
spam becomes spam
|
||||
spam_and_eggs becomes spamAndEggs
|
||||
|
||||
Args:
|
||||
member_name: str or unicode The Python syle name which should be
|
||||
converted to JSON-C style.
|
||||
|
||||
Returns:
|
||||
The JSON-C style name as a str or unicode.
|
||||
"""
|
||||
|
||||
characters = []
|
||||
uppercase_next = False
|
||||
for character in member_name:
|
||||
if character == '_':
|
||||
uppercase_next = True
|
||||
elif uppercase_next:
|
||||
characters.append(character.upper())
|
||||
uppercase_next = False
|
||||
else:
|
||||
characters.append(character)
|
||||
return ''.join(characters)
|
||||
|
||||
|
||||
class Jsonc(object):
|
||||
"""Represents JSON-C data in an easy to access object format.
|
||||
|
||||
To access the members of a JSON structure which looks like this:
|
||||
{
|
||||
"data": {
|
||||
"totalItems": 800,
|
||||
"items": [
|
||||
{
|
||||
"content": {
|
||||
"1": "rtsp://v5.cache3.c.youtube.com/CiILENy.../0/0/0/video.3gp"
|
||||
},
|
||||
"viewCount": 220101,
|
||||
"commentCount": 22,
|
||||
"favoriteCount": 201
|
||||
}
|
||||
]
|
||||
},
|
||||
"apiVersion": "2.0"
|
||||
}
|
||||
|
||||
You would do the following:
|
||||
x = gdata.core.parse_json(the_above_string)
|
||||
# Gives you 800
|
||||
x.data.total_items
|
||||
# Should be 22
|
||||
x.data.items[0].comment_count
|
||||
# The apiVersion is '2.0'
|
||||
x.api_version
|
||||
|
||||
To create a Jsonc object which would produce the above JSON, you would do:
|
||||
gdata.core.Jsonc(
|
||||
api_version='2.0',
|
||||
data=gdata.core.Jsonc(
|
||||
total_items=800,
|
||||
items=[
|
||||
gdata.core.Jsonc(
|
||||
view_count=220101,
|
||||
comment_count=22,
|
||||
favorite_count=201,
|
||||
content={
|
||||
'1': ('rtsp://v5.cache3.c.youtube.com'
|
||||
'/CiILENy.../0/0/0/video.3gp')})]))
|
||||
or
|
||||
x = gdata.core.Jsonc()
|
||||
x.api_version = '2.0'
|
||||
x.data = gdata.core.Jsonc()
|
||||
x.data.total_items = 800
|
||||
x.data.items = []
|
||||
# etc.
|
||||
|
||||
How it works:
|
||||
The JSON-C data is stored in an internal dictionary (._dict) and the
|
||||
getattr, setattr, and delattr methods rewrite the name which you provide
|
||||
to mirror the expected format in JSON-C. (For more details on name
|
||||
conversion see _to_jsonc_name.) You may also access members using
|
||||
getitem, setitem, delitem as you would for a dictionary. For example
|
||||
x.data.total_items is equivalent to x['data']['totalItems']
|
||||
(Not all dict methods are supported so if you need something other than
|
||||
the item operations, then you will want to use the ._dict member).
|
||||
|
||||
You may need to use getitem or the _dict member to access certain
|
||||
properties in cases where the JSON-C syntax does not map neatly to Python
|
||||
objects. For example the YouTube Video feed has some JSON like this:
|
||||
"content": {"1": "rtsp://v5.cache3.c.youtube.com..."...}
|
||||
You cannot do x.content.1 in Python, so you would use the getitem as
|
||||
follows:
|
||||
x.content['1']
|
||||
or you could use the _dict member as follows:
|
||||
x.content._dict['1']
|
||||
|
||||
If you need to create a new object with such a mapping you could use.
|
||||
|
||||
x.content = gdata.core.Jsonc(_dict={'1': 'rtsp://cache3.c.youtube.com...'})
|
||||
"""
|
||||
|
||||
def __init__(self, _dict=None, **kwargs):
|
||||
json = _dict or {}
|
||||
for key, value in kwargs.iteritems():
|
||||
if key.startswith('_'):
|
||||
object.__setattr__(self, key, value)
|
||||
else:
|
||||
json[_to_jsonc_name(key)] = _convert_to_jsonc(value)
|
||||
|
||||
object.__setattr__(self, '_dict', json)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name.startswith('_'):
|
||||
object.__setattr__(self, name, value)
|
||||
else:
|
||||
object.__getattribute__(
|
||||
self, '_dict')[_to_jsonc_name(name)] = _convert_to_jsonc(value)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name.startswith('_'):
|
||||
object.__getattribute__(self, name)
|
||||
else:
|
||||
try:
|
||||
return object.__getattribute__(self, '_dict')[_to_jsonc_name(name)]
|
||||
except KeyError:
|
||||
raise AttributeError(
|
||||
'No member for %s or [\'%s\']' % (name, _to_jsonc_name(name)))
|
||||
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name.startswith('_'):
|
||||
object.__delattr__(self, name)
|
||||
else:
|
||||
try:
|
||||
del object.__getattribute__(self, '_dict')[_to_jsonc_name(name)]
|
||||
except KeyError:
|
||||
raise AttributeError(
|
||||
'No member for %s (or [\'%s\'])' % (name, _to_jsonc_name(name)))
|
||||
|
||||
# For container methods pass-through to the underlying dict.
|
||||
def __getitem__(self, key):
|
||||
return self._dict[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._dict[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self._dict[key]
|
||||
1186
python/gdata/data.py
Normal file
1186
python/gdata/data.py
Normal file
File diff suppressed because it is too large
Load Diff
269
python/gdata/docs/__init__.py
Normal file
269
python/gdata/docs/__init__.py
Normal file
@@ -0,0 +1,269 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains extensions to Atom objects used with Google Documents."""
|
||||
|
||||
__author__ = ('api.jfisher (Jeff Fisher), '
|
||||
'api.eric@google.com (Eric Bidelman)')
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
DOCUMENTS_NAMESPACE = 'http://schemas.google.com/docs/2007'
|
||||
|
||||
|
||||
class Scope(atom.AtomBase):
|
||||
"""The DocList ACL scope element"""
|
||||
|
||||
_tag = 'scope'
|
||||
_namespace = gdata.GACL_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['value'] = 'value'
|
||||
_attributes['type'] = 'type'
|
||||
|
||||
def __init__(self, value=None, type=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.value = value
|
||||
self.type = type
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class Role(atom.AtomBase):
|
||||
"""The DocList ACL role element"""
|
||||
|
||||
_tag = 'role'
|
||||
_namespace = gdata.GACL_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, value=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.value = value
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class FeedLink(atom.AtomBase):
|
||||
"""The DocList gd:feedLink element"""
|
||||
|
||||
_tag = 'feedLink'
|
||||
_namespace = gdata.GDATA_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['rel'] = 'rel'
|
||||
_attributes['href'] = 'href'
|
||||
|
||||
def __init__(self, href=None, rel=None, text=None, extension_elements=None,
|
||||
extension_attributes=None):
|
||||
self.href = href
|
||||
self.rel = rel
|
||||
atom.AtomBase.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
|
||||
class ResourceId(atom.AtomBase):
|
||||
"""The DocList gd:resourceId element"""
|
||||
|
||||
_tag = 'resourceId'
|
||||
_namespace = gdata.GDATA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
def __init__(self, value=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.value = value
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class LastModifiedBy(atom.Person):
|
||||
"""The DocList gd:lastModifiedBy element"""
|
||||
|
||||
_tag = 'lastModifiedBy'
|
||||
_namespace = gdata.GDATA_NAMESPACE
|
||||
|
||||
|
||||
class LastViewed(atom.Person):
|
||||
"""The DocList gd:lastViewed element"""
|
||||
|
||||
_tag = 'lastViewed'
|
||||
_namespace = gdata.GDATA_NAMESPACE
|
||||
|
||||
|
||||
class WritersCanInvite(atom.AtomBase):
|
||||
"""The DocList docs:writersCanInvite element"""
|
||||
|
||||
_tag = 'writersCanInvite'
|
||||
_namespace = DOCUMENTS_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['value'] = 'value'
|
||||
|
||||
|
||||
class DocumentListEntry(gdata.GDataEntry):
|
||||
"""The Google Documents version of an Atom Entry"""
|
||||
|
||||
_tag = gdata.GDataEntry._tag
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feedLink', FeedLink)
|
||||
_children['{%s}resourceId' % gdata.GDATA_NAMESPACE] = ('resourceId',
|
||||
ResourceId)
|
||||
_children['{%s}lastModifiedBy' % gdata.GDATA_NAMESPACE] = ('lastModifiedBy',
|
||||
LastModifiedBy)
|
||||
_children['{%s}lastViewed' % gdata.GDATA_NAMESPACE] = ('lastViewed',
|
||||
LastViewed)
|
||||
_children['{%s}writersCanInvite' % DOCUMENTS_NAMESPACE] = (
|
||||
'writersCanInvite', WritersCanInvite)
|
||||
|
||||
def __init__(self, resourceId=None, feedLink=None, lastViewed=None,
|
||||
lastModifiedBy=None, writersCanInvite=None, author=None,
|
||||
category=None, content=None, atom_id=None, link=None,
|
||||
published=None, title=None, updated=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
self.feedLink = feedLink
|
||||
self.lastViewed = lastViewed
|
||||
self.lastModifiedBy = lastModifiedBy
|
||||
self.resourceId = resourceId
|
||||
self.writersCanInvite = writersCanInvite
|
||||
gdata.GDataEntry.__init__(
|
||||
self, author=author, category=category, content=content,
|
||||
atom_id=atom_id, link=link, published=published, title=title,
|
||||
updated=updated, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
def GetAclLink(self):
|
||||
"""Extracts the DocListEntry's <gd:feedLink>.
|
||||
|
||||
Returns:
|
||||
A FeedLink object.
|
||||
"""
|
||||
return self.feedLink
|
||||
|
||||
def GetDocumentType(self):
|
||||
"""Extracts the type of document from the DocListEntry.
|
||||
|
||||
This method returns the type of document the DocListEntry
|
||||
represents. Possible values are document, presentation,
|
||||
spreadsheet, folder, or pdf.
|
||||
|
||||
Returns:
|
||||
A string representing the type of document.
|
||||
"""
|
||||
if self.category:
|
||||
for category in self.category:
|
||||
if category.scheme == gdata.GDATA_NAMESPACE + '#kind':
|
||||
return category.label
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def DocumentListEntryFromString(xml_string):
|
||||
"""Converts an XML string into a DocumentListEntry object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Document List feed entry.
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(DocumentListEntry, xml_string)
|
||||
|
||||
|
||||
class DocumentListAclEntry(gdata.GDataEntry):
|
||||
"""A DocList ACL Entry flavor of an Atom Entry"""
|
||||
|
||||
_tag = gdata.GDataEntry._tag
|
||||
_namespace = gdata.GDataEntry._namespace
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}scope' % gdata.GACL_NAMESPACE] = ('scope', Scope)
|
||||
_children['{%s}role' % gdata.GACL_NAMESPACE] = ('role', Role)
|
||||
|
||||
def __init__(self, category=None, atom_id=None, link=None,
|
||||
title=None, updated=None, scope=None, role=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
gdata.GDataEntry.__init__(self, author=None, category=category,
|
||||
content=None, atom_id=atom_id, link=link,
|
||||
published=None, title=title,
|
||||
updated=updated, text=None)
|
||||
self.scope = scope
|
||||
self.role = role
|
||||
|
||||
|
||||
def DocumentListAclEntryFromString(xml_string):
|
||||
"""Converts an XML string into a DocumentListAclEntry object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Document List ACL feed entry.
|
||||
|
||||
Returns:
|
||||
A DocumentListAclEntry object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(DocumentListAclEntry, xml_string)
|
||||
|
||||
|
||||
class DocumentListFeed(gdata.GDataFeed):
|
||||
"""A feed containing a list of Google Documents Items"""
|
||||
|
||||
_tag = gdata.GDataFeed._tag
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
|
||||
[DocumentListEntry])
|
||||
|
||||
|
||||
def DocumentListFeedFromString(xml_string):
|
||||
"""Converts an XML string into a DocumentListFeed object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a DocumentList feed.
|
||||
|
||||
Returns:
|
||||
A DocumentListFeed object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(DocumentListFeed, xml_string)
|
||||
|
||||
|
||||
class DocumentListAclFeed(gdata.GDataFeed):
|
||||
"""A DocList ACL feed flavor of a Atom feed"""
|
||||
|
||||
_tag = gdata.GDataFeed._tag
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
|
||||
[DocumentListAclEntry])
|
||||
|
||||
|
||||
def DocumentListAclFeedFromString(xml_string):
|
||||
"""Converts an XML string into a DocumentListAclFeed object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a DocumentList feed.
|
||||
|
||||
Returns:
|
||||
A DocumentListFeed object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(DocumentListAclFeed, xml_string)
|
||||
611
python/gdata/docs/client.py
Normal file
611
python/gdata/docs/client.py
Normal file
@@ -0,0 +1,611 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""DocsClient extends gdata.client.GDClient to streamline DocList API calls."""
|
||||
|
||||
|
||||
__author__ = 'e.bidelman (Eric Bidelman)'
|
||||
|
||||
import mimetypes
|
||||
import urllib
|
||||
import atom.data
|
||||
import atom.http_core
|
||||
import gdata.client
|
||||
import gdata.docs.data
|
||||
import gdata.gauth
|
||||
|
||||
|
||||
# Feed URI templates
|
||||
DOCLIST_FEED_URI = '/feeds/default/private/full/'
|
||||
FOLDERS_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/contents'
|
||||
ACL_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/acl'
|
||||
REVISIONS_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/revisions'
|
||||
|
||||
|
||||
class DocsClient(gdata.client.GDClient):
|
||||
"""Client extension for the Google Documents List API."""
|
||||
|
||||
host = 'docs.google.com' # default server for the API
|
||||
api_version = '3.0' # default major version for the service.
|
||||
auth_service = 'writely'
|
||||
auth_scopes = gdata.gauth.AUTH_SCOPES['writely']
|
||||
ssl = True
|
||||
|
||||
def __init__(self, auth_token=None, **kwargs):
|
||||
"""Constructs a new client for the DocList API.
|
||||
|
||||
Args:
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: The other parameters to pass to gdata.client.GDClient constructor.
|
||||
"""
|
||||
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
|
||||
|
||||
def get_file_content(self, uri, auth_token=None, **kwargs):
|
||||
"""Fetches the file content from the specified uri.
|
||||
|
||||
This method is useful for downloading/exporting a file within enviornments
|
||||
like Google App Engine, where the user does not have the ability to write
|
||||
the file to a local disk.
|
||||
|
||||
Args:
|
||||
uri: str The full URL to fetch the file contents from.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.request().
|
||||
|
||||
Returns:
|
||||
The binary file content.
|
||||
|
||||
Raises:
|
||||
gdata.client.RequestError: on error response from server.
|
||||
"""
|
||||
server_response = self.request('GET', uri, auth_token=auth_token, **kwargs)
|
||||
if server_response.status != 200:
|
||||
raise gdata.client.RequestError, {'status': server_response.status,
|
||||
'reason': server_response.reason,
|
||||
'body': server_response.read()}
|
||||
return server_response.read()
|
||||
|
||||
GetFileContent = get_file_content
|
||||
|
||||
def _download_file(self, uri, file_path, auth_token=None, **kwargs):
|
||||
"""Downloads a file to disk from the specified URI.
|
||||
|
||||
Note: to download a file in memory, use the GetFileContent() method.
|
||||
|
||||
Args:
|
||||
uri: str The full URL to download the file from.
|
||||
file_path: str The full path to save the file to.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.get_file_content().
|
||||
|
||||
Raises:
|
||||
gdata.client.RequestError: on error response from server.
|
||||
"""
|
||||
f = open(file_path, 'wb')
|
||||
try:
|
||||
f.write(self.get_file_content(uri, auth_token=auth_token, **kwargs))
|
||||
except gdata.client.RequestError, e:
|
||||
f.close()
|
||||
raise e
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
_DownloadFile = _download_file
|
||||
|
||||
def get_doclist(self, uri=None, limit=None, auth_token=None, **kwargs):
|
||||
"""Retrieves the main doclist feed containing the user's items.
|
||||
|
||||
Args:
|
||||
uri: str (optional) A URI to query the doclist feed.
|
||||
limit: int (optional) A maximum cap for the number of results to
|
||||
return in the feed. By default, the API returns a maximum of 100
|
||||
per page. Thus, if you set limit=5000, you will get <= 5000
|
||||
documents (guarenteed no more than 5000), and will need to follow the
|
||||
feed's next links (feed.GetNextLink()) to the rest. See
|
||||
get_everything(). Similarly, if you set limit=50, only <= 50
|
||||
documents are returned. Note: if the max-results parameter is set in
|
||||
the uri parameter, it is chosen over a value set for limit.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.get_feed().
|
||||
|
||||
Returns:
|
||||
gdata.docs.data.DocList feed.
|
||||
"""
|
||||
if uri is None:
|
||||
uri = DOCLIST_FEED_URI
|
||||
|
||||
if isinstance(uri, (str, unicode)):
|
||||
uri = atom.http_core.Uri.parse_uri(uri)
|
||||
|
||||
# Add max-results param if it wasn't included in the uri.
|
||||
if limit is not None and not 'max-results' in uri.query:
|
||||
uri.query['max-results'] = limit
|
||||
|
||||
return self.get_feed(uri, desired_class=gdata.docs.data.DocList,
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
GetDocList = get_doclist
|
||||
|
||||
def get_doc(self, resource_id, etag=None, auth_token=None, **kwargs):
|
||||
"""Retrieves a particular document given by its resource id.
|
||||
|
||||
Args:
|
||||
resource_id: str The document/item's resource id. Example spreadsheet:
|
||||
'spreadsheet%3A0A1234567890'.
|
||||
etag: str (optional) The document/item's etag value to be used in a
|
||||
conditional GET. See http://code.google.com/apis/documents/docs/3.0/
|
||||
developers_guide_protocol.html#RetrievingCached.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.get_entry().
|
||||
|
||||
Returns:
|
||||
A gdata.docs.data.DocsEntry object representing the retrieved entry.
|
||||
|
||||
Raises:
|
||||
ValueError if the resource_id is not a valid format.
|
||||
"""
|
||||
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
|
||||
if match is None:
|
||||
raise ValueError, 'Invalid resource id: %s' % resource_id
|
||||
return self.get_entry(
|
||||
DOCLIST_FEED_URI + resource_id, etag=etag,
|
||||
desired_class=gdata.docs.data.DocsEntry,
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
GetDoc = get_doc
|
||||
|
||||
def get_everything(self, uri=None, auth_token=None, **kwargs):
|
||||
"""Retrieves the user's entire doc list.
|
||||
|
||||
The method makes multiple HTTP requests (by following the feed's next links)
|
||||
in order to fetch the user's entire document list.
|
||||
|
||||
Args:
|
||||
uri: str (optional) A URI to query the doclist feed with.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.GetDocList().
|
||||
|
||||
Returns:
|
||||
A list of gdata.docs.data.DocsEntry objects representing the retrieved
|
||||
entries.
|
||||
"""
|
||||
if uri is None:
|
||||
uri = DOCLIST_FEED_URI
|
||||
|
||||
feed = self.GetDocList(uri=uri, auth_token=auth_token, **kwargs)
|
||||
entries = feed.entry
|
||||
|
||||
while feed.GetNextLink() is not None:
|
||||
feed = self.GetDocList(
|
||||
feed.GetNextLink().href, auth_token=auth_token, **kwargs)
|
||||
entries.extend(feed.entry)
|
||||
|
||||
return entries
|
||||
|
||||
GetEverything = get_everything
|
||||
|
||||
def get_acl_permissions(self, resource_id, auth_token=None, **kwargs):
|
||||
"""Retrieves a the ACL sharing permissions for a document.
|
||||
|
||||
Args:
|
||||
resource_id: str The document/item's resource id. Example for pdf:
|
||||
'pdf%3A0A1234567890'.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.get_feed().
|
||||
|
||||
Returns:
|
||||
A gdata.docs.data.AclFeed object representing the document's ACL entries.
|
||||
|
||||
Raises:
|
||||
ValueError if the resource_id is not a valid format.
|
||||
"""
|
||||
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
|
||||
if match is None:
|
||||
raise ValueError, 'Invalid resource id: %s' % resource_id
|
||||
|
||||
return self.get_feed(
|
||||
ACL_FEED_TEMPLATE % resource_id, desired_class=gdata.docs.data.AclFeed,
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
GetAclPermissions = get_acl_permissions
|
||||
|
||||
def get_revisions(self, resource_id, auth_token=None, **kwargs):
|
||||
"""Retrieves the revision history for a document.
|
||||
|
||||
Args:
|
||||
resource_id: str The document/item's resource id. Example for pdf:
|
||||
'pdf%3A0A1234567890'.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.get_feed().
|
||||
|
||||
Returns:
|
||||
A gdata.docs.data.RevisionFeed representing the document's revisions.
|
||||
|
||||
Raises:
|
||||
ValueError if the resource_id is not a valid format.
|
||||
"""
|
||||
match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id)
|
||||
if match is None:
|
||||
raise ValueError, 'Invalid resource id: %s' % resource_id
|
||||
|
||||
return self.get_feed(
|
||||
REVISIONS_FEED_TEMPLATE % resource_id,
|
||||
desired_class=gdata.docs.data.RevisionFeed, auth_token=auth_token,
|
||||
**kwargs)
|
||||
|
||||
GetRevisions = get_revisions
|
||||
|
||||
def create(self, doc_type, title, folder_or_id=None, writers_can_invite=None,
|
||||
auth_token=None, **kwargs):
|
||||
"""Creates a new item in the user's doclist.
|
||||
|
||||
Args:
|
||||
doc_type: str The type of object to create. For example: 'document',
|
||||
'spreadsheet', 'folder', 'presentation'.
|
||||
title: str A title for the document.
|
||||
folder_or_id: gdata.docs.data.DocsEntry or str (optional) Folder entry or
|
||||
the resouce id of a folder to create the object under. Note: A valid
|
||||
resource id for a folder is of the form: folder%3Afolder_id.
|
||||
writers_can_invite: bool (optional) False prevents collaborators from
|
||||
being able to invite others to edit or view the document.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.post().
|
||||
|
||||
Returns:
|
||||
gdata.docs.data.DocsEntry containing information newly created item.
|
||||
"""
|
||||
entry = gdata.docs.data.DocsEntry(title=atom.data.Title(text=title))
|
||||
entry.category.append(gdata.docs.data.make_kind_category(doc_type))
|
||||
|
||||
if isinstance(writers_can_invite, gdata.docs.data.WritersCanInvite):
|
||||
entry.writers_can_invite = writers_can_invite
|
||||
elif isinstance(writers_can_invite, bool):
|
||||
entry.writers_can_invite = gdata.docs.data.WritersCanInvite(
|
||||
value=str(writers_can_invite).lower())
|
||||
|
||||
uri = DOCLIST_FEED_URI
|
||||
|
||||
if folder_or_id is not None:
|
||||
if isinstance(folder_or_id, gdata.docs.data.DocsEntry):
|
||||
# Verify that we're uploading the resource into to a folder.
|
||||
if folder_or_id.get_document_type() == gdata.docs.data.FOLDER_LABEL:
|
||||
uri = folder_or_id.content.src
|
||||
else:
|
||||
raise gdata.client.Error, 'Trying to upload item to a non-folder.'
|
||||
else:
|
||||
uri = FOLDERS_FEED_TEMPLATE % folder_or_id
|
||||
|
||||
return self.post(entry, uri, auth_token=auth_token, **kwargs)
|
||||
|
||||
Create = create
|
||||
|
||||
def copy(self, source_entry, title, auth_token=None, **kwargs):
|
||||
"""Copies a native Google document, spreadsheet, or presentation.
|
||||
|
||||
Note: arbitrary file types and PDFs do not support this feature.
|
||||
|
||||
Args:
|
||||
source_entry: gdata.docs.data.DocsEntry An object representing the source
|
||||
document/folder.
|
||||
title: str A title for the new document.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.post().
|
||||
|
||||
Returns:
|
||||
A gdata.docs.data.DocsEntry of the duplicated document.
|
||||
"""
|
||||
entry = gdata.docs.data.DocsEntry(
|
||||
title=atom.data.Title(text=title),
|
||||
id=atom.data.Id(text=source_entry.GetSelfLink().href))
|
||||
return self.post(entry, DOCLIST_FEED_URI, auth_token=auth_token, **kwargs)
|
||||
|
||||
Copy = copy
|
||||
|
||||
def move(self, source_entry, folder_entry=None,
|
||||
keep_in_folders=False, auth_token=None, **kwargs):
|
||||
"""Moves an item into a different folder (or to the root document list).
|
||||
|
||||
Args:
|
||||
source_entry: gdata.docs.data.DocsEntry An object representing the source
|
||||
document/folder.
|
||||
folder_entry: gdata.docs.data.DocsEntry (optional) An object representing
|
||||
the destination folder. If None, set keep_in_folders to
|
||||
True to remove the item from all parent folders.
|
||||
keep_in_folders: boolean (optional) If True, the source entry
|
||||
is not removed from any existing parent folders it is in.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.post().
|
||||
|
||||
Returns:
|
||||
A gdata.docs.data.DocsEntry of the moved entry or True if just moving the
|
||||
item out of all folders (e.g. Move(source_entry)).
|
||||
"""
|
||||
entry = gdata.docs.data.DocsEntry(id=source_entry.id)
|
||||
|
||||
# Remove the item from any folders it is already in.
|
||||
if not keep_in_folders:
|
||||
for folder in source_entry.InFolders():
|
||||
self.delete(
|
||||
'%s/contents/%s' % (
|
||||
folder.href,
|
||||
urllib.quote(source_entry.resource_id.text)),
|
||||
force=True)
|
||||
|
||||
# If we're moving the resource into a folder, verify it is a folder entry.
|
||||
if folder_entry is not None:
|
||||
if folder_entry.get_document_type() == gdata.docs.data.FOLDER_LABEL:
|
||||
return self.post(entry, folder_entry.content.src,
|
||||
auth_token=auth_token, **kwargs)
|
||||
else:
|
||||
raise gdata.client.Error, 'Trying to move item into a non-folder.'
|
||||
|
||||
return True
|
||||
|
||||
Move = move
|
||||
|
||||
def upload(self, media, title, folder_or_uri=None, content_type=None,
|
||||
auth_token=None, **kwargs):
|
||||
"""Uploads a file to Google Docs.
|
||||
|
||||
Args:
|
||||
media: A gdata.data.MediaSource object containing the file to be
|
||||
uploaded or a string of the filepath.
|
||||
title: str The title of the document on the server after being
|
||||
uploaded.
|
||||
folder_or_uri: gdata.docs.data.DocsEntry or str (optional) An object with
|
||||
a link to the folder or the uri to upload the file to.
|
||||
Note: A valid uri for a folder is of the form:
|
||||
/feeds/default/private/full/folder%3Afolder_id/contents
|
||||
content_type: str (optional) The file's mimetype. If not provided, the
|
||||
one in the media source object is used or the mimetype is inferred
|
||||
from the filename (if media is a string). When media is a filename,
|
||||
it is always recommended to pass in a content type.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.post().
|
||||
|
||||
Returns:
|
||||
A gdata.docs.data.DocsEntry containing information about uploaded doc.
|
||||
"""
|
||||
uri = None
|
||||
if folder_or_uri is not None:
|
||||
if isinstance(folder_or_uri, gdata.docs.data.DocsEntry):
|
||||
# Verify that we're uploading the resource into to a folder.
|
||||
if folder_or_uri.get_document_type() == gdata.docs.data.FOLDER_LABEL:
|
||||
uri = folder_or_uri.content.src
|
||||
else:
|
||||
raise gdata.client.Error, 'Trying to upload item to a non-folder.'
|
||||
else:
|
||||
uri = folder_or_uri
|
||||
else:
|
||||
uri = DOCLIST_FEED_URI
|
||||
|
||||
# Create media source if media is a filepath.
|
||||
if isinstance(media, (str, unicode)):
|
||||
mimetype = mimetypes.guess_type(media)[0]
|
||||
if mimetype is None and content_type is None:
|
||||
raise ValueError, ("Unknown mimetype. Please pass in the file's "
|
||||
"content_type")
|
||||
else:
|
||||
media = gdata.data.MediaSource(file_path=media,
|
||||
content_type=content_type)
|
||||
|
||||
entry = gdata.docs.data.DocsEntry(title=atom.data.Title(text=title))
|
||||
|
||||
return self.post(entry, uri, media_source=media,
|
||||
desired_class=gdata.docs.data.DocsEntry,
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
Upload = upload
|
||||
|
||||
def download(self, entry_or_id_or_url, file_path, extra_params=None,
|
||||
auth_token=None, **kwargs):
|
||||
"""Downloads a file from the Document List to local disk.
|
||||
|
||||
Note: to download a file in memory, use the GetFileContent() method.
|
||||
|
||||
Args:
|
||||
entry_or_id_or_url: gdata.docs.data.DocsEntry or string representing a
|
||||
resource id or URL to download the document from (such as the content
|
||||
src link).
|
||||
file_path: str The full path to save the file to.
|
||||
extra_params: dict (optional) A map of any further parameters to control
|
||||
how the document is downloaded/exported. For example, exporting a
|
||||
spreadsheet as a .csv: extra_params={'gid': 0, 'exportFormat': 'csv'}
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self._download_file().
|
||||
|
||||
Raises:
|
||||
gdata.client.RequestError if the download URL is malformed or the server's
|
||||
response was not successful.
|
||||
ValueError if entry_or_id_or_url was a resource id for a filetype
|
||||
in which the download link cannot be manually constructed (e.g. pdf).
|
||||
"""
|
||||
if isinstance(entry_or_id_or_url, gdata.docs.data.DocsEntry):
|
||||
url = entry_or_id_or_url.content.src
|
||||
else:
|
||||
if gdata.docs.data.RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
|
||||
url = gdata.docs.data.make_content_link_from_resource_id(
|
||||
entry_or_id_or_url)
|
||||
else:
|
||||
url = entry_or_id_or_url
|
||||
|
||||
if extra_params is not None:
|
||||
if 'exportFormat' in extra_params and url.find('/Export?') == -1:
|
||||
raise gdata.client.Error, ('This entry type cannot be exported '
|
||||
'as a different format.')
|
||||
|
||||
if 'gid' in extra_params and url.find('spreadsheets') == -1:
|
||||
raise gdata.client.Error, 'gid param is not valid for this doc type.'
|
||||
|
||||
url += '&' + urllib.urlencode(extra_params)
|
||||
|
||||
self._download_file(url, file_path, auth_token=auth_token, **kwargs)
|
||||
|
||||
Download = download
|
||||
|
||||
def export(self, entry_or_id_or_url, file_path, gid=None, auth_token=None,
|
||||
**kwargs):
|
||||
"""Exports a document from the Document List in a different format.
|
||||
|
||||
Args:
|
||||
entry_or_id_or_url: gdata.docs.data.DocsEntry or string representing a
|
||||
resource id or URL to download the document from (such as the content
|
||||
src link).
|
||||
file_path: str The full path to save the file to. The export
|
||||
format is inferred from the the file extension.
|
||||
gid: str (optional) grid id for downloading a single grid of a
|
||||
spreadsheet. The param should only be used for .csv and .tsv
|
||||
spreadsheet exports.
|
||||
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
|
||||
OAuthToken which authorizes this client to edit the user's data.
|
||||
kwargs: Other parameters to pass to self.download().
|
||||
|
||||
Raises:
|
||||
gdata.client.RequestError if the download URL is malformed or the server's
|
||||
response was not successful.
|
||||
"""
|
||||
extra_params = {}
|
||||
|
||||
match = gdata.docs.data.FILE_EXT_PATTERN.match(file_path)
|
||||
if match:
|
||||
extra_params['exportFormat'] = match.group(1)
|
||||
|
||||
if gid is not None:
|
||||
extra_params['gid'] = gid
|
||||
|
||||
self.download(entry_or_id_or_url, file_path, extra_params,
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
Export = export
|
||||
|
||||
|
||||
class DocsQuery(gdata.client.Query):
|
||||
|
||||
def __init__(self, title=None, title_exact=None, opened_min=None,
|
||||
opened_max=None, edited_min=None, edited_max=None, owner=None,
|
||||
writer=None, reader=None, show_folders=None,
|
||||
show_deleted=None, ocr=None, target_language=None,
|
||||
source_language=None, convert=None, **kwargs):
|
||||
"""Constructs a query URL for the Google Documents List API.
|
||||
|
||||
Args:
|
||||
title: str (optional) Specifies the search terms for the title of a
|
||||
document. This parameter used without title_exact will only
|
||||
submit partial queries, not exact queries.
|
||||
title_exact: str (optional) Meaningless without title. Possible values
|
||||
are 'true' and 'false'. Note: Matches are case-insensitive.
|
||||
opened_min: str (optional) Lower bound on the last time a document was
|
||||
opened by the current user. Use the RFC 3339 timestamp
|
||||
format. For example: opened_min='2005-08-09T09:57:00-08:00'.
|
||||
opened_max: str (optional) Upper bound on the last time a document was
|
||||
opened by the current user. (See also opened_min.)
|
||||
edited_min: str (optional) Lower bound on the last time a document was
|
||||
edited by the current user. This value corresponds to the
|
||||
edited.text value in the doc's entry object, which
|
||||
represents changes to the document's content or metadata.
|
||||
Use the RFC 3339 timestamp format. For example:
|
||||
edited_min='2005-08-09T09:57:00-08:00'
|
||||
edited_max: str (optional) Upper bound on the last time a document was
|
||||
edited by the user. (See also edited_min.)
|
||||
owner: str (optional) Searches for documents with a specific owner. Use
|
||||
the email address of the owner. For example:
|
||||
owner='user@gmail.com'
|
||||
writer: str (optional) Searches for documents which can be written to
|
||||
by specific users. Use a single email address or a comma
|
||||
separated list of email addresses. For example:
|
||||
writer='user1@gmail.com,user@example.com'
|
||||
reader: str (optional) Searches for documents which can be read by
|
||||
specific users. (See also writer.)
|
||||
show_folders: str (optional) Specifies whether the query should return
|
||||
folders as well as documents. Possible values are 'true'
|
||||
and 'false'. Default is false.
|
||||
show_deleted: str (optional) Specifies whether the query should return
|
||||
documents which are in the trash as well as other
|
||||
documents. Possible values are 'true' and 'false'.
|
||||
Default is false.
|
||||
ocr: str (optional) Specifies whether to attempt OCR on a .jpg, .png, or
|
||||
.gif upload. Possible values are 'true' and 'false'. Default is
|
||||
false. See OCR in the Protocol Guide:
|
||||
http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#OCR
|
||||
target_language: str (optional) Specifies the language to translate a
|
||||
document into. See Document Translation in the Protocol
|
||||
Guide for a table of possible values:
|
||||
http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#DocumentTranslation
|
||||
source_language: str (optional) Specifies the source language of the
|
||||
original document. Optional when using the translation
|
||||
service. If not provided, Google will attempt to
|
||||
auto-detect the source language. See Document
|
||||
Translation in the Protocol Guide for a table of
|
||||
possible values (link in target_language).
|
||||
convert: str (optional) Used when uploading arbitrary file types to
|
||||
specity if document-type uploads should convert to a native
|
||||
Google Docs format. Possible values are 'true' and 'false'.
|
||||
The default is 'true'.
|
||||
"""
|
||||
gdata.client.Query.__init__(self, **kwargs)
|
||||
self.convert = convert
|
||||
self.title = title
|
||||
self.title_exact = title_exact
|
||||
self.opened_min = opened_min
|
||||
self.opened_max = opened_max
|
||||
self.edited_min = edited_min
|
||||
self.edited_max = edited_max
|
||||
self.owner = owner
|
||||
self.writer = writer
|
||||
self.reader = reader
|
||||
self.show_folders = show_folders
|
||||
self.show_deleted = show_deleted
|
||||
self.ocr = ocr
|
||||
self.target_language = target_language
|
||||
self.source_language = source_language
|
||||
|
||||
def modify_request(self, http_request):
|
||||
gdata.client._add_query_param('convert', self.convert, http_request)
|
||||
gdata.client._add_query_param('title', self.title, http_request)
|
||||
gdata.client._add_query_param('title-exact', self.title_exact,
|
||||
http_request)
|
||||
gdata.client._add_query_param('opened-min', self.opened_min, http_request)
|
||||
gdata.client._add_query_param('opened-max', self.opened_max, http_request)
|
||||
gdata.client._add_query_param('edited-min', self.edited_min, http_request)
|
||||
gdata.client._add_query_param('edited-max', self.edited_max, http_request)
|
||||
gdata.client._add_query_param('owner', self.owner, http_request)
|
||||
gdata.client._add_query_param('writer', self.writer, http_request)
|
||||
gdata.client._add_query_param('reader', self.reader, http_request)
|
||||
gdata.client._add_query_param('showfolders', self.show_folders,
|
||||
http_request)
|
||||
gdata.client._add_query_param('showdeleted', self.show_deleted,
|
||||
http_request)
|
||||
gdata.client._add_query_param('ocr', self.ocr, http_request)
|
||||
gdata.client._add_query_param('targetLanguage', self.target_language,
|
||||
http_request)
|
||||
gdata.client._add_query_param('sourceLanguage', self.source_language,
|
||||
http_request)
|
||||
gdata.client.Query.modify_request(self, http_request)
|
||||
|
||||
ModifyRequest = modify_request
|
||||
280
python/gdata/docs/data.py
Normal file
280
python/gdata/docs/data.py
Normal file
@@ -0,0 +1,280 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Data model classes for parsing and generating XML for the DocList Data API"""
|
||||
|
||||
__author__ = 'e.bidelman (Eric Bidelman)'
|
||||
|
||||
|
||||
import re
|
||||
import atom.core
|
||||
import atom.data
|
||||
import gdata.acl.data
|
||||
import gdata.data
|
||||
|
||||
DOCUMENTS_NS = 'http://schemas.google.com/docs/2007'
|
||||
DOCUMENTS_TEMPLATE = '{http://schemas.google.com/docs/2007}%s'
|
||||
ACL_FEEDLINK_REL = 'http://schemas.google.com/acl/2007#accessControlList'
|
||||
REVISION_FEEDLINK_REL = DOCUMENTS_NS + '/revisions'
|
||||
|
||||
# XML Namespaces used in Google Documents entities.
|
||||
DATA_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind'
|
||||
DOCUMENT_LABEL = 'document'
|
||||
SPREADSHEET_LABEL = 'spreadsheet'
|
||||
PRESENTATION_LABEL = 'presentation'
|
||||
FOLDER_LABEL = 'folder'
|
||||
PDF_LABEL = 'pdf'
|
||||
|
||||
LABEL_SCHEME = 'http://schemas.google.com/g/2005/labels'
|
||||
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
|
||||
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
|
||||
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
|
||||
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
|
||||
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
|
||||
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
|
||||
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
|
||||
|
||||
DOCS_PARENT_LINK_REL = DOCUMENTS_NS + '#parent'
|
||||
DOCS_PUBLISH_LINK_REL = DOCUMENTS_NS + '#publish'
|
||||
|
||||
FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
|
||||
RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$')
|
||||
|
||||
# File extension/mimetype pairs of common format.
|
||||
MIMETYPES = {
|
||||
'CSV': 'text/csv',
|
||||
'TSV': 'text/tab-separated-values',
|
||||
'TAB': 'text/tab-separated-values',
|
||||
'DOC': 'application/msword',
|
||||
'DOCX': ('application/vnd.openxmlformats-officedocument.'
|
||||
'wordprocessingml.document'),
|
||||
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
|
||||
'ODT': 'application/vnd.oasis.opendocument.text',
|
||||
'RTF': 'application/rtf',
|
||||
'SXW': 'application/vnd.sun.xml.writer',
|
||||
'TXT': 'text/plain',
|
||||
'XLS': 'application/vnd.ms-excel',
|
||||
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
||||
'PDF': 'application/pdf',
|
||||
'PNG': 'image/png',
|
||||
'PPT': 'application/vnd.ms-powerpoint',
|
||||
'PPS': 'application/vnd.ms-powerpoint',
|
||||
'HTM': 'text/html',
|
||||
'HTML': 'text/html',
|
||||
'ZIP': 'application/zip',
|
||||
'SWF': 'application/x-shockwave-flash'
|
||||
}
|
||||
|
||||
|
||||
def make_kind_category(label):
|
||||
"""Builds the appropriate atom.data.Category for the label passed in.
|
||||
|
||||
Args:
|
||||
label: str The value for the category entry.
|
||||
|
||||
Returns:
|
||||
An atom.data.Category or None if label is None.
|
||||
"""
|
||||
if label is None:
|
||||
return None
|
||||
|
||||
return atom.data.Category(
|
||||
scheme=DATA_KIND_SCHEME, term='%s#%s' % (DOCUMENTS_NS, label), label=label)
|
||||
|
||||
MakeKindCategory = make_kind_category
|
||||
|
||||
def make_content_link_from_resource_id(resource_id):
|
||||
"""Constructs export URL for a given resource.
|
||||
|
||||
Args:
|
||||
resource_id: str The document/item's resource id. Example presentation:
|
||||
'presentation%3A0A1234567890'.
|
||||
|
||||
Raises:
|
||||
gdata.client.ValueError if the resource_id is not a valid format.
|
||||
"""
|
||||
match = RESOURCE_ID_PATTERN.match(resource_id)
|
||||
|
||||
if match:
|
||||
label = match.group(1)
|
||||
doc_id = match.group(3)
|
||||
if label == DOCUMENT_LABEL:
|
||||
return '/feeds/download/documents/Export?docId=%s' % doc_id
|
||||
if label == PRESENTATION_LABEL:
|
||||
return '/feeds/download/presentations/Export?docId=%s' % doc_id
|
||||
if label == SPREADSHEET_LABEL:
|
||||
return ('https://spreadsheets.google.com/feeds/download/spreadsheets/'
|
||||
'Export?key=%s' % doc_id)
|
||||
raise ValueError, ('Invalid resource id: %s, or manually creating the '
|
||||
'download url for this type of doc is not possible'
|
||||
% resource_id)
|
||||
|
||||
MakeContentLinkFromResourceId = make_content_link_from_resource_id
|
||||
|
||||
|
||||
class ResourceId(atom.core.XmlElement):
|
||||
"""The DocList gd:resourceId element."""
|
||||
_qname = gdata.data.GDATA_TEMPLATE % 'resourceId'
|
||||
|
||||
|
||||
class LastModifiedBy(atom.data.Person):
|
||||
"""The DocList gd:lastModifiedBy element."""
|
||||
_qname = gdata.data.GDATA_TEMPLATE % 'lastModifiedBy'
|
||||
|
||||
|
||||
class LastViewed(atom.data.Person):
|
||||
"""The DocList gd:lastViewed element."""
|
||||
_qname = gdata.data.GDATA_TEMPLATE % 'lastViewed'
|
||||
|
||||
|
||||
class WritersCanInvite(atom.core.XmlElement):
|
||||
"""The DocList docs:writersCanInvite element."""
|
||||
_qname = DOCUMENTS_TEMPLATE % 'writersCanInvite'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class QuotaBytesUsed(atom.core.XmlElement):
|
||||
"""The DocList gd:quotaBytesUsed element."""
|
||||
_qname = gdata.data.GDATA_TEMPLATE % 'quotaBytesUsed'
|
||||
|
||||
|
||||
class Publish(atom.core.XmlElement):
|
||||
"""The DocList docs:publish element."""
|
||||
_qname = DOCUMENTS_TEMPLATE % 'publish'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class PublishAuto(atom.core.XmlElement):
|
||||
"""The DocList docs:publishAuto element."""
|
||||
_qname = DOCUMENTS_TEMPLATE % 'publishAuto'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class PublishOutsideDomain(atom.core.XmlElement):
|
||||
"""The DocList docs:publishOutsideDomain element."""
|
||||
_qname = DOCUMENTS_TEMPLATE % 'publishOutsideDomain'
|
||||
value = 'value'
|
||||
|
||||
|
||||
class DocsEntry(gdata.data.GDEntry):
|
||||
"""A DocList version of an Atom Entry."""
|
||||
|
||||
last_viewed = LastViewed
|
||||
last_modified_by = LastModifiedBy
|
||||
resource_id = ResourceId
|
||||
writers_can_invite = WritersCanInvite
|
||||
quota_bytes_used = QuotaBytesUsed
|
||||
feed_link = [gdata.data.FeedLink]
|
||||
|
||||
def get_document_type(self):
|
||||
"""Extracts the type of document this DocsEntry is.
|
||||
|
||||
This method returns the type of document the DocsEntry represents. Possible
|
||||
values are document, presentation, spreadsheet, folder, or pdf.
|
||||
|
||||
Returns:
|
||||
A string representing the type of document.
|
||||
"""
|
||||
if self.category:
|
||||
for category in self.category:
|
||||
if category.scheme == DATA_KIND_SCHEME:
|
||||
return category.label
|
||||
else:
|
||||
return None
|
||||
|
||||
GetDocumentType = get_document_type
|
||||
|
||||
def get_acl_feed_link(self):
|
||||
"""Extracts the DocsEntry's ACL feed <gd:feedLink>.
|
||||
|
||||
Returns:
|
||||
A gdata.data.FeedLink object.
|
||||
"""
|
||||
for feed_link in self.feed_link:
|
||||
if feed_link.rel == ACL_FEEDLINK_REL:
|
||||
return feed_link
|
||||
return None
|
||||
|
||||
GetAclFeedLink = get_acl_feed_link
|
||||
|
||||
def get_revisions_feed_link(self):
|
||||
"""Extracts the DocsEntry's revisions feed <gd:feedLink>.
|
||||
|
||||
Returns:
|
||||
A gdata.data.FeedLink object.
|
||||
"""
|
||||
for feed_link in self.feed_link:
|
||||
if feed_link.rel == REVISION_FEEDLINK_REL:
|
||||
return feed_link
|
||||
return None
|
||||
|
||||
GetRevisionsFeedLink = get_revisions_feed_link
|
||||
|
||||
def in_folders(self):
|
||||
"""Returns the parents link(s) (folders) of this entry."""
|
||||
links = []
|
||||
for link in self.link:
|
||||
if link.rel == DOCS_PARENT_LINK_REL and link.href:
|
||||
links.append(link)
|
||||
return links
|
||||
|
||||
InFolders = in_folders
|
||||
|
||||
|
||||
class Acl(gdata.acl.data.AclEntry):
|
||||
"""A document ACL entry."""
|
||||
|
||||
|
||||
class DocList(gdata.data.GDFeed):
|
||||
"""The main DocList feed containing a list of Google Documents."""
|
||||
entry = [DocsEntry]
|
||||
|
||||
|
||||
class AclFeed(gdata.acl.data.AclFeed):
|
||||
"""A DocList ACL feed."""
|
||||
entry = [Acl]
|
||||
|
||||
|
||||
class Revision(gdata.data.GDEntry):
|
||||
"""A document Revision entry."""
|
||||
publish = Publish
|
||||
publish_auto = PublishAuto
|
||||
publish_outside_domain = PublishOutsideDomain
|
||||
|
||||
def find_publish_link(self):
|
||||
"""Get the link that points to the published document on the web.
|
||||
|
||||
Returns:
|
||||
A str for the URL in the link with a rel ending in #publish.
|
||||
"""
|
||||
return self.find_url(DOCS_PUBLISH_LINK_REL)
|
||||
|
||||
FindPublishLink = find_publish_link
|
||||
|
||||
def get_publish_link(self):
|
||||
"""Get the link that points to the published document on the web.
|
||||
|
||||
Returns:
|
||||
A gdata.data.Link for the link with a rel ending in #publish.
|
||||
"""
|
||||
return self.get_link(DOCS_PUBLISH_LINK_REL)
|
||||
|
||||
GetPublishLink = get_publish_link
|
||||
|
||||
|
||||
class RevisionFeed(gdata.data.GDFeed):
|
||||
"""A DocList Revision feed."""
|
||||
entry = [Revision]
|
||||
611
python/gdata/docs/service.py
Normal file
611
python/gdata/docs/service.py
Normal file
@@ -0,0 +1,611 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""DocsService extends the GDataService to streamline Google Documents
|
||||
operations.
|
||||
|
||||
DocsService: Provides methods to query feeds and manipulate items.
|
||||
Extends GDataService.
|
||||
|
||||
DocumentQuery: Queries a Google Document list feed.
|
||||
|
||||
DocumentAclQuery: Queries a Google Document Acl feed.
|
||||
"""
|
||||
|
||||
|
||||
__author__ = ('api.jfisher (Jeff Fisher), '
|
||||
'e.bidelman (Eric Bidelman)')
|
||||
|
||||
import re
|
||||
import atom
|
||||
import gdata.service
|
||||
import gdata.docs
|
||||
import urllib
|
||||
|
||||
# XML Namespaces used in Google Documents entities.
|
||||
DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind'
|
||||
DOCUMENT_LABEL = 'document'
|
||||
SPREADSHEET_LABEL = 'spreadsheet'
|
||||
PRESENTATION_LABEL = 'presentation'
|
||||
FOLDER_LABEL = 'folder'
|
||||
PDF_LABEL = 'pdf'
|
||||
|
||||
LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels'
|
||||
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
|
||||
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
|
||||
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
|
||||
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
|
||||
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
|
||||
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
|
||||
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
|
||||
|
||||
FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/'
|
||||
|
||||
# File extensions of documents that are permitted to be uploaded or downloaded.
|
||||
SUPPORTED_FILETYPES = {
|
||||
'CSV': 'text/csv',
|
||||
'TSV': 'text/tab-separated-values',
|
||||
'TAB': 'text/tab-separated-values',
|
||||
'DOC': 'application/msword',
|
||||
'DOCX': ('application/vnd.openxmlformats-officedocument.'
|
||||
'wordprocessingml.document'),
|
||||
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
|
||||
'ODT': 'application/vnd.oasis.opendocument.text',
|
||||
'RTF': 'application/rtf',
|
||||
'SXW': 'application/vnd.sun.xml.writer',
|
||||
'TXT': 'text/plain',
|
||||
'XLS': 'application/vnd.ms-excel',
|
||||
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
||||
'PDF': 'application/pdf',
|
||||
'PNG': 'image/png',
|
||||
'PPT': 'application/vnd.ms-powerpoint',
|
||||
'PPS': 'application/vnd.ms-powerpoint',
|
||||
'HTM': 'text/html',
|
||||
'HTML': 'text/html',
|
||||
'ZIP': 'application/zip',
|
||||
'SWF': 'application/x-shockwave-flash'
|
||||
}
|
||||
|
||||
|
||||
class DocsService(gdata.service.GDataService):
|
||||
|
||||
"""Client extension for the Google Documents service Document List feed."""
|
||||
|
||||
__FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
|
||||
__RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$')
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server='docs.google.com', additional_headers=None, **kwargs):
|
||||
"""Creates a client for the Google Documents service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'docs.google.com'.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service='writely', source=source,
|
||||
server=server, additional_headers=additional_headers, **kwargs)
|
||||
|
||||
def _MakeKindCategory(self, label):
|
||||
if label is None:
|
||||
return None
|
||||
return atom.Category(scheme=DATA_KIND_SCHEME,
|
||||
term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label)
|
||||
|
||||
def _MakeContentLinkFromId(self, resource_id):
|
||||
match = self.__RESOURCE_ID_PATTERN.match(resource_id)
|
||||
label = match.group(1)
|
||||
doc_id = match.group(3)
|
||||
if label == DOCUMENT_LABEL:
|
||||
return '/feeds/download/documents/Export?docId=%s' % doc_id
|
||||
if label == PRESENTATION_LABEL:
|
||||
return '/feeds/download/presentations/Export?docId=%s' % doc_id
|
||||
if label == SPREADSHEET_LABEL:
|
||||
return ('https://spreadsheets.google.com/feeds/download/spreadsheets/'
|
||||
'Export?key=%s' % doc_id)
|
||||
raise ValueError, 'Invalid resource id: %s' % resource_id
|
||||
|
||||
def _UploadFile(self, media_source, title, category, folder_or_uri=None):
|
||||
"""Uploads a file to the Document List feed.
|
||||
|
||||
Args:
|
||||
media_source: A gdata.MediaSource object containing the file to be
|
||||
uploaded.
|
||||
title: string The title of the document on the server after being
|
||||
uploaded.
|
||||
category: An atom.Category object specifying the appropriate document
|
||||
type.
|
||||
folder_or_uri: DocumentListEntry or string (optional) An object with a
|
||||
link to a folder or a uri to a folder to upload to.
|
||||
Note: A valid uri for a folder is of the form:
|
||||
/feeds/folders/private/full/folder%3Afolder_id
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry containing information about the document created on
|
||||
the Google Documents service.
|
||||
"""
|
||||
if folder_or_uri:
|
||||
try:
|
||||
uri = folder_or_uri.content.src
|
||||
except AttributeError:
|
||||
uri = folder_or_uri
|
||||
else:
|
||||
uri = '/feeds/documents/private/full'
|
||||
|
||||
entry = gdata.docs.DocumentListEntry()
|
||||
entry.title = atom.Title(text=title)
|
||||
if category is not None:
|
||||
entry.category.append(category)
|
||||
entry = self.Post(entry, uri, media_source=media_source,
|
||||
extra_headers={'Slug': media_source.file_name},
|
||||
converter=gdata.docs.DocumentListEntryFromString)
|
||||
return entry
|
||||
|
||||
def _DownloadFile(self, uri, file_path):
|
||||
"""Downloads a file.
|
||||
|
||||
Args:
|
||||
uri: string The full Export URL to download the file from.
|
||||
file_path: string The full path to save the file to.
|
||||
|
||||
Raises:
|
||||
RequestError: on error response from server.
|
||||
"""
|
||||
server_response = self.request('GET', uri)
|
||||
response_body = server_response.read()
|
||||
if server_response.status != 200:
|
||||
raise gdata.service.RequestError, {'status': server_response.status,
|
||||
'reason': server_response.reason,
|
||||
'body': response_body}
|
||||
f = open(file_path, 'wb')
|
||||
f.write(response_body)
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
def MoveIntoFolder(self, source_entry, folder_entry):
|
||||
"""Moves a document into a folder in the Document List Feed.
|
||||
|
||||
Args:
|
||||
source_entry: DocumentListEntry An object representing the source
|
||||
document/folder.
|
||||
folder_entry: DocumentListEntry An object with a link to the destination
|
||||
folder.
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry containing information about the document created on
|
||||
the Google Documents service.
|
||||
"""
|
||||
entry = gdata.docs.DocumentListEntry()
|
||||
entry.id = source_entry.id
|
||||
entry = self.Post(entry, folder_entry.content.src,
|
||||
converter=gdata.docs.DocumentListEntryFromString)
|
||||
return entry
|
||||
|
||||
def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString):
|
||||
"""Queries the Document List feed and returns the resulting feed of
|
||||
entries.
|
||||
|
||||
Args:
|
||||
uri: string The full URI to be queried. This can contain query
|
||||
parameters, a hostname, or simply the relative path to a Document
|
||||
List feed. The DocumentQuery object is useful when constructing
|
||||
query parameters.
|
||||
converter: func (optional) A function which will be executed on the
|
||||
retrieved item, generally to render it into a Python object.
|
||||
By default the DocumentListFeedFromString function is used to
|
||||
return a DocumentListFeed object. This is because most feed
|
||||
queries will result in a feed and not a single entry.
|
||||
"""
|
||||
return self.Get(uri, converter=converter)
|
||||
|
||||
def QueryDocumentListFeed(self, uri):
|
||||
"""Retrieves a DocumentListFeed by retrieving a URI based off the Document
|
||||
List feed, including any query parameters. A DocumentQuery object can
|
||||
be used to construct these parameters.
|
||||
|
||||
Args:
|
||||
uri: string The URI of the feed being retrieved possibly with query
|
||||
parameters.
|
||||
|
||||
Returns:
|
||||
A DocumentListFeed object representing the feed returned by the server.
|
||||
"""
|
||||
return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString)
|
||||
|
||||
def GetDocumentListEntry(self, uri):
|
||||
"""Retrieves a particular DocumentListEntry by its unique URI.
|
||||
|
||||
Args:
|
||||
uri: string The unique URI of an entry in a Document List feed.
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry object representing the retrieved entry.
|
||||
"""
|
||||
return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)
|
||||
|
||||
def GetDocumentListFeed(self, uri=None):
|
||||
"""Retrieves a feed containing all of a user's documents.
|
||||
|
||||
Args:
|
||||
uri: string A full URI to query the Document List feed.
|
||||
"""
|
||||
if not uri:
|
||||
uri = gdata.docs.service.DocumentQuery().ToUri()
|
||||
return self.QueryDocumentListFeed(uri)
|
||||
|
||||
def GetDocumentListAclEntry(self, uri):
|
||||
"""Retrieves a particular DocumentListAclEntry by its unique URI.
|
||||
|
||||
Args:
|
||||
uri: string The unique URI of an entry in a Document List feed.
|
||||
|
||||
Returns:
|
||||
A DocumentListAclEntry object representing the retrieved entry.
|
||||
"""
|
||||
return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString)
|
||||
|
||||
def GetDocumentListAclFeed(self, uri):
|
||||
"""Retrieves a feed containing all of a user's documents.
|
||||
|
||||
Args:
|
||||
uri: string The URI of a document's Acl feed to retrieve.
|
||||
|
||||
Returns:
|
||||
A DocumentListAclFeed object representing the ACL feed
|
||||
returned by the server.
|
||||
"""
|
||||
return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString)
|
||||
|
||||
def Upload(self, media_source, title, folder_or_uri=None, label=None):
|
||||
"""Uploads a document inside of a MediaSource object to the Document List
|
||||
feed with the given title.
|
||||
|
||||
Args:
|
||||
media_source: MediaSource The gdata.MediaSource object containing a
|
||||
document file to be uploaded.
|
||||
title: string The title of the document on the server after being
|
||||
uploaded.
|
||||
folder_or_uri: DocumentListEntry or string (optional) An object with a
|
||||
link to a folder or a uri to a folder to upload to.
|
||||
Note: A valid uri for a folder is of the form:
|
||||
/feeds/folders/private/full/folder%3Afolder_id
|
||||
label: optional label describing the type of the document to be created.
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry containing information about the document created
|
||||
on the Google Documents service.
|
||||
"""
|
||||
|
||||
return self._UploadFile(media_source, title, self._MakeKindCategory(label),
|
||||
folder_or_uri)
|
||||
|
||||
def Download(self, entry_or_id_or_url, file_path, export_format=None,
|
||||
gid=None, extra_params=None):
|
||||
"""Downloads a document from the Document List.
|
||||
|
||||
Args:
|
||||
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
|
||||
or a url to download from (such as the content src).
|
||||
file_path: string The full path to save the file to.
|
||||
export_format: the format to convert to, if conversion is required.
|
||||
gid: grid id, for downloading a single grid of a spreadsheet
|
||||
extra_params: a map of any further parameters to control how the document
|
||||
is downloaded
|
||||
|
||||
Raises:
|
||||
RequestError if the service does not respond with success
|
||||
"""
|
||||
|
||||
if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry):
|
||||
url = entry_or_id_or_url.content.src
|
||||
else:
|
||||
if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
|
||||
url = self._MakeContentLinkFromId(entry_or_id_or_url)
|
||||
else:
|
||||
url = entry_or_id_or_url
|
||||
|
||||
if export_format is not None:
|
||||
if url.find('/Export?') == -1:
|
||||
raise gdata.service.Error, ('This entry cannot be exported '
|
||||
'as a different format')
|
||||
url += '&exportFormat=%s' % export_format
|
||||
|
||||
if gid is not None:
|
||||
if url.find('spreadsheets') == -1:
|
||||
raise gdata.service.Error, 'grid id param is not valid for this entry'
|
||||
url += '&gid=%s' % gid
|
||||
|
||||
if extra_params:
|
||||
url += '&' + urllib.urlencode(extra_params)
|
||||
|
||||
self._DownloadFile(url, file_path)
|
||||
|
||||
def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None):
|
||||
"""Downloads a document from the Document List in a different format.
|
||||
|
||||
Args:
|
||||
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
|
||||
or a url to download from (such as the content src).
|
||||
file_path: string The full path to save the file to. The export
|
||||
format is inferred from the the file extension.
|
||||
gid: grid id, for downloading a single grid of a spreadsheet
|
||||
extra_params: a map of any further parameters to control how the document
|
||||
is downloaded
|
||||
|
||||
Raises:
|
||||
RequestError if the service does not respond with success
|
||||
"""
|
||||
ext = None
|
||||
match = self.__FILE_EXT_PATTERN.match(file_path)
|
||||
if match:
|
||||
ext = match.group(1)
|
||||
self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params)
|
||||
|
||||
def CreateFolder(self, title, folder_or_uri=None):
|
||||
"""Creates a folder in the Document List feed.
|
||||
|
||||
Args:
|
||||
title: string The title of the folder on the server after being created.
|
||||
folder_or_uri: DocumentListEntry or string (optional) An object with a
|
||||
link to a folder or a uri to a folder to upload to.
|
||||
Note: A valid uri for a folder is of the form:
|
||||
/feeds/folders/private/full/folder%3Afolder_id
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry containing information about the folder created on
|
||||
the Google Documents service.
|
||||
"""
|
||||
if folder_or_uri:
|
||||
try:
|
||||
uri = folder_or_uri.content.src
|
||||
except AttributeError:
|
||||
uri = folder_or_uri
|
||||
else:
|
||||
uri = '/feeds/documents/private/full'
|
||||
|
||||
folder_entry = gdata.docs.DocumentListEntry()
|
||||
folder_entry.title = atom.Title(text=title)
|
||||
folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL))
|
||||
folder_entry = self.Post(folder_entry, uri,
|
||||
converter=gdata.docs.DocumentListEntryFromString)
|
||||
|
||||
return folder_entry
|
||||
|
||||
|
||||
def MoveOutOfFolder(self, source_entry):
|
||||
"""Moves a document into a folder in the Document List Feed.
|
||||
|
||||
Args:
|
||||
source_entry: DocumentListEntry An object representing the source
|
||||
document/folder.
|
||||
|
||||
Returns:
|
||||
True if the entry was moved out.
|
||||
"""
|
||||
return self.Delete(source_entry.GetEditLink().href)
|
||||
|
||||
# Deprecated methods
|
||||
|
||||
#@atom.deprecated('Please use Upload instead')
|
||||
def UploadPresentation(self, media_source, title, folder_or_uri=None):
|
||||
"""Uploads a presentation inside of a MediaSource object to the Document
|
||||
List feed with the given title.
|
||||
|
||||
This method is deprecated, use Upload instead.
|
||||
|
||||
Args:
|
||||
media_source: MediaSource The MediaSource object containing a
|
||||
presentation file to be uploaded.
|
||||
title: string The title of the presentation on the server after being
|
||||
uploaded.
|
||||
folder_or_uri: DocumentListEntry or string (optional) An object with a
|
||||
link to a folder or a uri to a folder to upload to.
|
||||
Note: A valid uri for a folder is of the form:
|
||||
/feeds/folders/private/full/folder%3Afolder_id
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry containing information about the presentation created
|
||||
on the Google Documents service.
|
||||
"""
|
||||
return self._UploadFile(
|
||||
media_source, title, self._MakeKindCategory(PRESENTATION_LABEL),
|
||||
folder_or_uri=folder_or_uri)
|
||||
|
||||
UploadPresentation = atom.deprecated('Please use Upload instead')(
|
||||
UploadPresentation)
|
||||
|
||||
#@atom.deprecated('Please use Upload instead')
|
||||
def UploadSpreadsheet(self, media_source, title, folder_or_uri=None):
|
||||
"""Uploads a spreadsheet inside of a MediaSource object to the Document
|
||||
List feed with the given title.
|
||||
|
||||
This method is deprecated, use Upload instead.
|
||||
|
||||
Args:
|
||||
media_source: MediaSource The MediaSource object containing a spreadsheet
|
||||
file to be uploaded.
|
||||
title: string The title of the spreadsheet on the server after being
|
||||
uploaded.
|
||||
folder_or_uri: DocumentListEntry or string (optional) An object with a
|
||||
link to a folder or a uri to a folder to upload to.
|
||||
Note: A valid uri for a folder is of the form:
|
||||
/feeds/folders/private/full/folder%3Afolder_id
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry containing information about the spreadsheet created
|
||||
on the Google Documents service.
|
||||
"""
|
||||
return self._UploadFile(
|
||||
media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL),
|
||||
folder_or_uri=folder_or_uri)
|
||||
|
||||
UploadSpreadsheet = atom.deprecated('Please use Upload instead')(
|
||||
UploadSpreadsheet)
|
||||
|
||||
#@atom.deprecated('Please use Upload instead')
|
||||
def UploadDocument(self, media_source, title, folder_or_uri=None):
|
||||
"""Uploads a document inside of a MediaSource object to the Document List
|
||||
feed with the given title.
|
||||
|
||||
This method is deprecated, use Upload instead.
|
||||
|
||||
Args:
|
||||
media_source: MediaSource The gdata.MediaSource object containing a
|
||||
document file to be uploaded.
|
||||
title: string The title of the document on the server after being
|
||||
uploaded.
|
||||
folder_or_uri: DocumentListEntry or string (optional) An object with a
|
||||
link to a folder or a uri to a folder to upload to.
|
||||
Note: A valid uri for a folder is of the form:
|
||||
/feeds/folders/private/full/folder%3Afolder_id
|
||||
|
||||
Returns:
|
||||
A DocumentListEntry containing information about the document created
|
||||
on the Google Documents service.
|
||||
"""
|
||||
return self._UploadFile(
|
||||
media_source, title, self._MakeKindCategory(DOCUMENT_LABEL),
|
||||
folder_or_uri=folder_or_uri)
|
||||
|
||||
UploadDocument = atom.deprecated('Please use Upload instead')(
|
||||
UploadDocument)
|
||||
|
||||
"""Calling any of these functions is the same as calling Export"""
|
||||
DownloadDocument = atom.deprecated('Please use Export instead')(Export)
|
||||
DownloadPresentation = atom.deprecated('Please use Export instead')(Export)
|
||||
DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export)
|
||||
|
||||
"""Calling any of these functions is the same as calling MoveIntoFolder"""
|
||||
MoveDocumentIntoFolder = atom.deprecated(
|
||||
'Please use MoveIntoFolder instead')(MoveIntoFolder)
|
||||
MovePresentationIntoFolder = atom.deprecated(
|
||||
'Please use MoveIntoFolder instead')(MoveIntoFolder)
|
||||
MoveSpreadsheetIntoFolder = atom.deprecated(
|
||||
'Please use MoveIntoFolder instead')(MoveIntoFolder)
|
||||
MoveFolderIntoFolder = atom.deprecated(
|
||||
'Please use MoveIntoFolder instead')(MoveIntoFolder)
|
||||
|
||||
|
||||
class DocumentQuery(gdata.service.Query):
|
||||
|
||||
"""Object used to construct a URI to query the Google Document List feed"""
|
||||
|
||||
def __init__(self, feed='/feeds/documents', visibility='private',
|
||||
projection='full', text_query=None, params=None,
|
||||
categories=None):
|
||||
"""Constructor for Document List Query
|
||||
|
||||
Args:
|
||||
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
|
||||
visibility: string (optional) The visibility chosen for the current feed.
|
||||
projection: string (optional) The projection chosen for the current feed.
|
||||
text_query: string (optional) The contents of the q query parameter. This
|
||||
string is URL escaped upon conversion to a URI.
|
||||
params: dict (optional) Parameter value string pairs which become URL
|
||||
params when translated to a URI. These parameters are added to
|
||||
the query's items.
|
||||
categories: list (optional) List of category strings which should be
|
||||
included as query categories. See gdata.service.Query for
|
||||
additional documentation.
|
||||
|
||||
Yields:
|
||||
A DocumentQuery object used to construct a URI based on the Document
|
||||
List feed.
|
||||
"""
|
||||
self.visibility = visibility
|
||||
self.projection = projection
|
||||
gdata.service.Query.__init__(self, feed, text_query, params, categories)
|
||||
|
||||
def ToUri(self):
|
||||
"""Generates a URI from the query parameters set in the object.
|
||||
|
||||
Returns:
|
||||
A string containing the URI used to retrieve entries from the Document
|
||||
List feed.
|
||||
"""
|
||||
old_feed = self.feed
|
||||
self.feed = '/'.join([old_feed, self.visibility, self.projection])
|
||||
new_feed = gdata.service.Query.ToUri(self)
|
||||
self.feed = old_feed
|
||||
return new_feed
|
||||
|
||||
def AddNamedFolder(self, email, folder_name):
|
||||
"""Adds a named folder category, qualified by a schema.
|
||||
|
||||
This function lets you query for documents that are contained inside a
|
||||
named folder without fear of collision with other categories.
|
||||
|
||||
Args:
|
||||
email: string The email of the user who owns the folder.
|
||||
folder_name: string The name of the folder.
|
||||
|
||||
Returns:
|
||||
The string of the category that was added to the object.
|
||||
"""
|
||||
|
||||
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
|
||||
self.categories.append(category)
|
||||
return category
|
||||
|
||||
def RemoveNamedFolder(self, email, folder_name):
|
||||
"""Removes a named folder category, qualified by a schema.
|
||||
|
||||
Args:
|
||||
email: string The email of the user who owns the folder.
|
||||
folder_name: string The name of the folder.
|
||||
|
||||
Returns:
|
||||
The string of the category that was removed to the object.
|
||||
"""
|
||||
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
|
||||
self.categories.remove(category)
|
||||
return category
|
||||
|
||||
|
||||
class DocumentAclQuery(gdata.service.Query):
|
||||
|
||||
"""Object used to construct a URI to query a Document's ACL feed"""
|
||||
|
||||
def __init__(self, resource_id, feed='/feeds/acl/private/full'):
|
||||
"""Constructor for Document ACL Query
|
||||
|
||||
Args:
|
||||
resource_id: string The resource id. (e.g. 'document%3Adocument_id',
|
||||
'spreadsheet%3Aspreadsheet_id', etc.)
|
||||
feed: string (optional) The path for the feed.
|
||||
(e.g. '/feeds/acl/private/full')
|
||||
|
||||
Yields:
|
||||
A DocumentAclQuery object used to construct a URI based on the Document
|
||||
ACL feed.
|
||||
"""
|
||||
self.resource_id = resource_id
|
||||
gdata.service.Query.__init__(self, feed)
|
||||
|
||||
def ToUri(self):
|
||||
"""Generates a URI from the query parameters set in the object.
|
||||
|
||||
Returns:
|
||||
A string containing the URI used to retrieve entries from the Document
|
||||
ACL feed.
|
||||
"""
|
||||
return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id)
|
||||
15
python/gdata/dublincore/__init__.py
Normal file
15
python/gdata/dublincore/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
78
python/gdata/dublincore/data.py
Normal file
78
python/gdata/dublincore/data.py
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains the data classes of the Dublin Core Metadata Initiative (DCMI) Extension"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
|
||||
|
||||
DC_TEMPLATE = '{http://purl.org/dc/terms/}%s'
|
||||
|
||||
|
||||
class Creator(atom.core.XmlElement):
|
||||
"""Entity primarily responsible for making the resource."""
|
||||
_qname = DC_TEMPLATE % 'creator'
|
||||
|
||||
|
||||
class Date(atom.core.XmlElement):
|
||||
"""Point or period of time associated with an event in the lifecycle of the resource."""
|
||||
_qname = DC_TEMPLATE % 'date'
|
||||
|
||||
|
||||
class Description(atom.core.XmlElement):
|
||||
"""Account of the resource."""
|
||||
_qname = DC_TEMPLATE % 'description'
|
||||
|
||||
|
||||
class Format(atom.core.XmlElement):
|
||||
"""File format, physical medium, or dimensions of the resource."""
|
||||
_qname = DC_TEMPLATE % 'format'
|
||||
|
||||
|
||||
class Identifier(atom.core.XmlElement):
|
||||
"""An unambiguous reference to the resource within a given context."""
|
||||
_qname = DC_TEMPLATE % 'identifier'
|
||||
|
||||
|
||||
class Language(atom.core.XmlElement):
|
||||
"""Language of the resource."""
|
||||
_qname = DC_TEMPLATE % 'language'
|
||||
|
||||
|
||||
class Publisher(atom.core.XmlElement):
|
||||
"""Entity responsible for making the resource available."""
|
||||
_qname = DC_TEMPLATE % 'publisher'
|
||||
|
||||
|
||||
class Rights(atom.core.XmlElement):
|
||||
"""Information about rights held in and over the resource."""
|
||||
_qname = DC_TEMPLATE % 'rights'
|
||||
|
||||
|
||||
class Subject(atom.core.XmlElement):
|
||||
"""Topic of the resource."""
|
||||
_qname = DC_TEMPLATE % 'subject'
|
||||
|
||||
|
||||
class Title(atom.core.XmlElement):
|
||||
"""Name given to the resource."""
|
||||
_qname = DC_TEMPLATE % 'title'
|
||||
|
||||
|
||||
217
python/gdata/exif/__init__.py
Normal file
217
python/gdata/exif/__init__.py
Normal file
@@ -0,0 +1,217 @@
|
||||
# -*-*- encoding: utf-8 -*-*-
|
||||
#
|
||||
# This is gdata.photos.exif, implementing the exif namespace in gdata
|
||||
#
|
||||
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
|
||||
#
|
||||
# Copyright 2007 Håvard Gulldahl
|
||||
# Portions copyright 2007 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This module maps elements from the {EXIF} namespace[1] to GData objects.
|
||||
These elements describe image data, using exif attributes[2].
|
||||
|
||||
Picasa Web Albums uses the exif namespace to represent Exif data encoded
|
||||
in a photo [3].
|
||||
|
||||
Picasa Web Albums uses the following exif elements:
|
||||
exif:distance
|
||||
exif:exposure
|
||||
exif:flash
|
||||
exif:focallength
|
||||
exif:fstop
|
||||
exif:imageUniqueID
|
||||
exif:iso
|
||||
exif:make
|
||||
exif:model
|
||||
exif:tags
|
||||
exif:time
|
||||
|
||||
[1]: http://schemas.google.com/photos/exif/2007.
|
||||
[2]: http://en.wikipedia.org/wiki/Exif
|
||||
[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference
|
||||
"""
|
||||
|
||||
|
||||
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
|
||||
__license__ = 'Apache License v2'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
|
||||
|
||||
class ExifBaseElement(atom.AtomBase):
|
||||
"""Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag
|
||||
""" % EXIF_NAMESPACE
|
||||
|
||||
_tag = ''
|
||||
_namespace = EXIF_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
def __init__(self, name=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.name = name
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
class Distance(ExifBaseElement):
|
||||
"(float) The distance to the subject, e.g. 0.0"
|
||||
|
||||
_tag = 'distance'
|
||||
def DistanceFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Distance, xml_string)
|
||||
|
||||
class Exposure(ExifBaseElement):
|
||||
"(float) The exposure time used, e.g. 0.025 or 8.0E4"
|
||||
|
||||
_tag = 'exposure'
|
||||
def ExposureFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Exposure, xml_string)
|
||||
|
||||
class Flash(ExifBaseElement):
|
||||
"""(string) Boolean value indicating whether the flash was used.
|
||||
The .text attribute will either be `true' or `false'
|
||||
|
||||
As a convenience, this object's .bool method will return what you want,
|
||||
so you can say:
|
||||
|
||||
flash_used = bool(Flash)
|
||||
|
||||
"""
|
||||
|
||||
_tag = 'flash'
|
||||
def __bool__(self):
|
||||
if self.text.lower() in ('true','false'):
|
||||
return self.text.lower() == 'true'
|
||||
def FlashFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Flash, xml_string)
|
||||
|
||||
class Focallength(ExifBaseElement):
|
||||
"(float) The focal length used, e.g. 23.7"
|
||||
|
||||
_tag = 'focallength'
|
||||
def FocallengthFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Focallength, xml_string)
|
||||
|
||||
class Fstop(ExifBaseElement):
|
||||
"(float) The fstop value used, e.g. 5.0"
|
||||
|
||||
_tag = 'fstop'
|
||||
def FstopFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Fstop, xml_string)
|
||||
|
||||
class ImageUniqueID(ExifBaseElement):
|
||||
"(string) The unique image ID for the photo. Generated by Google Photo servers"
|
||||
|
||||
_tag = 'imageUniqueID'
|
||||
def ImageUniqueIDFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(ImageUniqueID, xml_string)
|
||||
|
||||
class Iso(ExifBaseElement):
|
||||
"(int) The iso equivalent value used, e.g. 200"
|
||||
|
||||
_tag = 'iso'
|
||||
def IsoFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Iso, xml_string)
|
||||
|
||||
class Make(ExifBaseElement):
|
||||
"(string) The make of the camera used, e.g. Fictitious Camera Company"
|
||||
|
||||
_tag = 'make'
|
||||
def MakeFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Make, xml_string)
|
||||
|
||||
class Model(ExifBaseElement):
|
||||
"(string) The model of the camera used,e.g AMAZING-100D"
|
||||
|
||||
_tag = 'model'
|
||||
def ModelFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Model, xml_string)
|
||||
|
||||
class Time(ExifBaseElement):
|
||||
"""(int) The date/time the photo was taken, e.g. 1180294337000.
|
||||
Represented as the number of milliseconds since January 1st, 1970.
|
||||
|
||||
The value of this element will always be identical to the value
|
||||
of the <gphoto:timestamp>.
|
||||
|
||||
Look at this object's .isoformat() for a human friendly datetime string:
|
||||
|
||||
photo_epoch = Time.text # 1180294337000
|
||||
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
|
||||
|
||||
Alternatively:
|
||||
photo_datetime = Time.datetime() # (requires python >= 2.3)
|
||||
"""
|
||||
|
||||
_tag = 'time'
|
||||
def isoformat(self):
|
||||
"""(string) Return the timestamp as a ISO 8601 formatted string,
|
||||
e.g. '2007-05-27T19:32:17.000Z'
|
||||
"""
|
||||
import time
|
||||
epoch = float(self.text)/1000
|
||||
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
|
||||
|
||||
def datetime(self):
|
||||
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
|
||||
|
||||
Requires python 2.3
|
||||
"""
|
||||
import datetime
|
||||
epoch = float(self.text)/1000
|
||||
return datetime.datetime.fromtimestamp(epoch)
|
||||
|
||||
def TimeFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Time, xml_string)
|
||||
|
||||
class Tags(ExifBaseElement):
|
||||
"""The container for all exif elements.
|
||||
The <exif:tags> element can appear as a child of a photo entry.
|
||||
"""
|
||||
|
||||
_tag = 'tags'
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop)
|
||||
_children['{%s}make' % EXIF_NAMESPACE] = ('make', Make)
|
||||
_children['{%s}model' % EXIF_NAMESPACE] = ('model', Model)
|
||||
_children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance)
|
||||
_children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure)
|
||||
_children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash)
|
||||
_children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength)
|
||||
_children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso)
|
||||
_children['{%s}time' % EXIF_NAMESPACE] = ('time', Time)
|
||||
_children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID)
|
||||
|
||||
def __init__(self, extension_elements=None, extension_attributes=None, text=None):
|
||||
ExifBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
self.fstop=None
|
||||
self.make=None
|
||||
self.model=None
|
||||
self.distance=None
|
||||
self.exposure=None
|
||||
self.flash=None
|
||||
self.focallength=None
|
||||
self.iso=None
|
||||
self.time=None
|
||||
self.imageUniqueID=None
|
||||
def TagsFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Tags, xml_string)
|
||||
|
||||
486
python/gdata/finance/__init__.py
Normal file
486
python/gdata/finance/__init__.py
Normal file
@@ -0,0 +1,486 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Tan Swee Heng
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Contains extensions to Atom objects used with Google Finance."""
|
||||
|
||||
|
||||
__author__ = 'thesweeheng@gmail.com'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
GD_NAMESPACE = 'http://schemas.google.com/g/2005'
|
||||
GF_NAMESPACE = 'http://schemas.google.com/finance/2007'
|
||||
|
||||
|
||||
class Money(atom.AtomBase):
|
||||
"""The <gd:money> element."""
|
||||
_tag = 'money'
|
||||
_namespace = GD_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['amount'] = 'amount'
|
||||
_attributes['currencyCode'] = 'currency_code'
|
||||
|
||||
def __init__(self, amount=None, currency_code=None, **kwargs):
|
||||
self.amount = amount
|
||||
self.currency_code = currency_code
|
||||
atom.AtomBase.__init__(self, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return "%s %s" % (self.amount, self.currency_code)
|
||||
|
||||
|
||||
def MoneyFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Money, xml_string)
|
||||
|
||||
|
||||
class _Monies(atom.AtomBase):
|
||||
"""An element containing multiple <gd:money> in multiple currencies."""
|
||||
_namespace = GF_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}money' % GD_NAMESPACE] = ('money', [Money])
|
||||
|
||||
def __init__(self, money=None, **kwargs):
|
||||
self.money = money or []
|
||||
atom.AtomBase.__init__(self, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return " / ".join(["%s" % i for i in self.money])
|
||||
|
||||
|
||||
class CostBasis(_Monies):
|
||||
"""The <gf:costBasis> element."""
|
||||
_tag = 'costBasis'
|
||||
|
||||
|
||||
def CostBasisFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(CostBasis, xml_string)
|
||||
|
||||
|
||||
class DaysGain(_Monies):
|
||||
"""The <gf:daysGain> element."""
|
||||
_tag = 'daysGain'
|
||||
|
||||
|
||||
def DaysGainFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(DaysGain, xml_string)
|
||||
|
||||
|
||||
class Gain(_Monies):
|
||||
"""The <gf:gain> element."""
|
||||
_tag = 'gain'
|
||||
|
||||
|
||||
def GainFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Gain, xml_string)
|
||||
|
||||
|
||||
class MarketValue(_Monies):
|
||||
"""The <gf:marketValue> element."""
|
||||
_tag = 'gain'
|
||||
_tag = 'marketValue'
|
||||
|
||||
|
||||
def MarketValueFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(MarketValue, xml_string)
|
||||
|
||||
|
||||
class Commission(_Monies):
|
||||
"""The <gf:commission> element."""
|
||||
_tag = 'commission'
|
||||
|
||||
|
||||
def CommissionFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Commission, xml_string)
|
||||
|
||||
|
||||
class Price(_Monies):
|
||||
"""The <gf:price> element."""
|
||||
_tag = 'price'
|
||||
|
||||
|
||||
def PriceFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Price, xml_string)
|
||||
|
||||
|
||||
class Symbol(atom.AtomBase):
|
||||
"""The <gf:symbol> element."""
|
||||
_tag = 'symbol'
|
||||
_namespace = GF_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['fullName'] = 'full_name'
|
||||
_attributes['exchange'] = 'exchange'
|
||||
_attributes['symbol'] = 'symbol'
|
||||
|
||||
def __init__(self, full_name=None, exchange=None, symbol=None, **kwargs):
|
||||
self.full_name = full_name
|
||||
self.exchange = exchange
|
||||
self.symbol = symbol
|
||||
atom.AtomBase.__init__(self, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return "%s:%s (%s)" % (self.exchange, self.symbol, self.full_name)
|
||||
|
||||
|
||||
def SymbolFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Symbol, xml_string)
|
||||
|
||||
|
||||
class TransactionData(atom.AtomBase):
|
||||
"""The <gf:transactionData> element."""
|
||||
_tag = 'transactionData'
|
||||
_namespace = GF_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['type'] = 'type'
|
||||
_attributes['date'] = 'date'
|
||||
_attributes['shares'] = 'shares'
|
||||
_attributes['notes'] = 'notes'
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}commission' % GF_NAMESPACE] = ('commission', Commission)
|
||||
_children['{%s}price' % GF_NAMESPACE] = ('price', Price)
|
||||
|
||||
def __init__(self, type=None, date=None, shares=None,
|
||||
notes=None, commission=None, price=None, **kwargs):
|
||||
self.type = type
|
||||
self.date = date
|
||||
self.shares = shares
|
||||
self.notes = notes
|
||||
self.commission = commission
|
||||
self.price = price
|
||||
atom.AtomBase.__init__(self, **kwargs)
|
||||
|
||||
|
||||
def TransactionDataFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(TransactionData, xml_string)
|
||||
|
||||
|
||||
class TransactionEntry(gdata.GDataEntry):
|
||||
"""An entry of the transaction feed.
|
||||
|
||||
A TransactionEntry contains TransactionData such as the transaction
|
||||
type (Buy, Sell, Sell Short, or Buy to Cover), the number of units,
|
||||
the date, the price, any commission, and any notes.
|
||||
"""
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_children['{%s}transactionData' % GF_NAMESPACE] = (
|
||||
'transaction_data', TransactionData)
|
||||
|
||||
def __init__(self, transaction_data=None, **kwargs):
|
||||
self.transaction_data = transaction_data
|
||||
gdata.GDataEntry.__init__(self, **kwargs)
|
||||
|
||||
def transaction_id(self):
|
||||
return self.id.text.split("/")[-1]
|
||||
|
||||
transaction_id = property(transaction_id, doc='The transaction ID.')
|
||||
|
||||
|
||||
def TransactionEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(TransactionEntry, xml_string)
|
||||
|
||||
|
||||
class TransactionFeed(gdata.GDataFeed):
|
||||
"""A feed that lists all of the transactions that have been recorded for
|
||||
a particular position.
|
||||
|
||||
A transaction is a collection of information about an instance of
|
||||
buying or selling a particular security. The TransactionFeed lists all
|
||||
of the transactions that have been recorded for a particular position
|
||||
as a list of TransactionEntries.
|
||||
"""
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [TransactionEntry])
|
||||
|
||||
|
||||
def TransactionFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(TransactionFeed, xml_string)
|
||||
|
||||
|
||||
class TransactionFeedLink(atom.AtomBase):
|
||||
"""Link to TransactionFeed embedded in PositionEntry.
|
||||
|
||||
If a PositionFeed is queried with transactions='true', TransactionFeeds
|
||||
are inlined in the returned PositionEntries. These TransactionFeeds are
|
||||
accessible via TransactionFeedLink's feed attribute.
|
||||
"""
|
||||
_tag = 'feedLink'
|
||||
_namespace = GD_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['href'] = 'href'
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}feed' % atom.ATOM_NAMESPACE] = (
|
||||
'feed', TransactionFeed)
|
||||
|
||||
def __init__(self, href=None, feed=None, **kwargs):
|
||||
self.href = href
|
||||
self.feed = feed
|
||||
atom.AtomBase.__init__(self, **kwargs)
|
||||
|
||||
|
||||
class PositionData(atom.AtomBase):
|
||||
"""The <gf:positionData> element."""
|
||||
_tag = 'positionData'
|
||||
_namespace = GF_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['gainPercentage'] = 'gain_percentage'
|
||||
_attributes['return1w'] = 'return1w'
|
||||
_attributes['return4w'] = 'return4w'
|
||||
_attributes['return3m'] = 'return3m'
|
||||
_attributes['returnYTD'] = 'returnYTD'
|
||||
_attributes['return1y'] = 'return1y'
|
||||
_attributes['return3y'] = 'return3y'
|
||||
_attributes['return5y'] = 'return5y'
|
||||
_attributes['returnOverall'] = 'return_overall'
|
||||
_attributes['shares'] = 'shares'
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis)
|
||||
_children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain)
|
||||
_children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain)
|
||||
_children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue)
|
||||
|
||||
def __init__(self, gain_percentage=None,
|
||||
return1w=None, return4w=None, return3m=None, returnYTD=None,
|
||||
return1y=None, return3y=None, return5y=None, return_overall=None,
|
||||
shares=None, cost_basis=None, days_gain=None,
|
||||
gain=None, market_value=None, **kwargs):
|
||||
self.gain_percentage = gain_percentage
|
||||
self.return1w = return1w
|
||||
self.return4w = return4w
|
||||
self.return3m = return3m
|
||||
self.returnYTD = returnYTD
|
||||
self.return1y = return1y
|
||||
self.return3y = return3y
|
||||
self.return5y = return5y
|
||||
self.return_overall = return_overall
|
||||
self.shares = shares
|
||||
self.cost_basis = cost_basis
|
||||
self.days_gain = days_gain
|
||||
self.gain = gain
|
||||
self.market_value = market_value
|
||||
atom.AtomBase.__init__(self, **kwargs)
|
||||
|
||||
|
||||
def PositionDataFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(PositionData, xml_string)
|
||||
|
||||
|
||||
class PositionEntry(gdata.GDataEntry):
|
||||
"""An entry of the position feed.
|
||||
|
||||
A PositionEntry contains the ticker exchange and Symbol for a stock,
|
||||
mutual fund, or other security, along with PositionData such as the
|
||||
number of units of that security that the user holds, and performance
|
||||
statistics.
|
||||
"""
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_children['{%s}positionData' % GF_NAMESPACE] = (
|
||||
'position_data', PositionData)
|
||||
_children['{%s}symbol' % GF_NAMESPACE] = ('symbol', Symbol)
|
||||
_children['{%s}feedLink' % GD_NAMESPACE] = (
|
||||
'feed_link', TransactionFeedLink)
|
||||
|
||||
def __init__(self, position_data=None, symbol=None, feed_link=None,
|
||||
**kwargs):
|
||||
self.position_data = position_data
|
||||
self.symbol = symbol
|
||||
self.feed_link = feed_link
|
||||
gdata.GDataEntry.__init__(self, **kwargs)
|
||||
|
||||
def position_title(self):
|
||||
return self.title.text
|
||||
|
||||
position_title = property(position_title,
|
||||
doc='The position title as a string (i.e. position.title.text).')
|
||||
|
||||
def ticker_id(self):
|
||||
return self.id.text.split("/")[-1]
|
||||
|
||||
ticker_id = property(ticker_id, doc='The position TICKER ID.')
|
||||
|
||||
def transactions(self):
|
||||
if self.feed_link.feed:
|
||||
return self.feed_link.feed.entry
|
||||
else:
|
||||
return None
|
||||
|
||||
transactions = property(transactions, doc="""
|
||||
Inlined TransactionEntries are returned if PositionFeed is queried
|
||||
with transactions='true'.""")
|
||||
|
||||
|
||||
def PositionEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(PositionEntry, xml_string)
|
||||
|
||||
|
||||
class PositionFeed(gdata.GDataFeed):
|
||||
"""A feed that lists all of the positions in a particular portfolio.
|
||||
|
||||
A position is a collection of information about a security that the
|
||||
user holds. The PositionFeed lists all of the positions in a particular
|
||||
portfolio as a list of PositionEntries.
|
||||
"""
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PositionEntry])
|
||||
|
||||
|
||||
def PositionFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(PositionFeed, xml_string)
|
||||
|
||||
|
||||
class PositionFeedLink(atom.AtomBase):
|
||||
"""Link to PositionFeed embedded in PortfolioEntry.
|
||||
|
||||
If a PortfolioFeed is queried with positions='true', the PositionFeeds
|
||||
are inlined in the returned PortfolioEntries. These PositionFeeds are
|
||||
accessible via PositionFeedLink's feed attribute.
|
||||
"""
|
||||
_tag = 'feedLink'
|
||||
_namespace = GD_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['href'] = 'href'
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}feed' % atom.ATOM_NAMESPACE] = (
|
||||
'feed', PositionFeed)
|
||||
|
||||
def __init__(self, href=None, feed=None, **kwargs):
|
||||
self.href = href
|
||||
self.feed = feed
|
||||
atom.AtomBase.__init__(self, **kwargs)
|
||||
|
||||
|
||||
class PortfolioData(atom.AtomBase):
|
||||
"""The <gf:portfolioData> element."""
|
||||
_tag = 'portfolioData'
|
||||
_namespace = GF_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['currencyCode'] = 'currency_code'
|
||||
_attributes['gainPercentage'] = 'gain_percentage'
|
||||
_attributes['return1w'] = 'return1w'
|
||||
_attributes['return4w'] = 'return4w'
|
||||
_attributes['return3m'] = 'return3m'
|
||||
_attributes['returnYTD'] = 'returnYTD'
|
||||
_attributes['return1y'] = 'return1y'
|
||||
_attributes['return3y'] = 'return3y'
|
||||
_attributes['return5y'] = 'return5y'
|
||||
_attributes['returnOverall'] = 'return_overall'
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis)
|
||||
_children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain)
|
||||
_children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain)
|
||||
_children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue)
|
||||
|
||||
def __init__(self, currency_code=None, gain_percentage=None,
|
||||
return1w=None, return4w=None, return3m=None, returnYTD=None,
|
||||
return1y=None, return3y=None, return5y=None, return_overall=None,
|
||||
cost_basis=None, days_gain=None, gain=None, market_value=None, **kwargs):
|
||||
self.currency_code = currency_code
|
||||
self.gain_percentage = gain_percentage
|
||||
self.return1w = return1w
|
||||
self.return4w = return4w
|
||||
self.return3m = return3m
|
||||
self.returnYTD = returnYTD
|
||||
self.return1y = return1y
|
||||
self.return3y = return3y
|
||||
self.return5y = return5y
|
||||
self.return_overall = return_overall
|
||||
self.cost_basis = cost_basis
|
||||
self.days_gain = days_gain
|
||||
self.gain = gain
|
||||
self.market_value = market_value
|
||||
atom.AtomBase.__init__(self, **kwargs)
|
||||
|
||||
|
||||
def PortfolioDataFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(PortfolioData, xml_string)
|
||||
|
||||
|
||||
class PortfolioEntry(gdata.GDataEntry):
|
||||
"""An entry of the PortfolioFeed.
|
||||
|
||||
A PortfolioEntry contains the portfolio's title along with PortfolioData
|
||||
such as currency, total market value, and overall performance statistics.
|
||||
"""
|
||||
_tag = 'entry'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_children['{%s}portfolioData' % GF_NAMESPACE] = (
|
||||
'portfolio_data', PortfolioData)
|
||||
_children['{%s}feedLink' % GD_NAMESPACE] = (
|
||||
'feed_link', PositionFeedLink)
|
||||
|
||||
def __init__(self, portfolio_data=None, feed_link=None, **kwargs):
|
||||
self.portfolio_data = portfolio_data
|
||||
self.feed_link = feed_link
|
||||
gdata.GDataEntry.__init__(self, **kwargs)
|
||||
|
||||
def portfolio_title(self):
|
||||
return self.title.text
|
||||
|
||||
def set_portfolio_title(self, portfolio_title):
|
||||
self.title = atom.Title(text=portfolio_title, title_type='text')
|
||||
|
||||
portfolio_title = property(portfolio_title, set_portfolio_title,
|
||||
doc='The portfolio title as a string (i.e. portfolio.title.text).')
|
||||
|
||||
def portfolio_id(self):
|
||||
return self.id.text.split("/")[-1]
|
||||
|
||||
portfolio_id = property(portfolio_id,
|
||||
doc='The portfolio ID. Do not confuse with portfolio.id.')
|
||||
|
||||
def positions(self):
|
||||
if self.feed_link.feed:
|
||||
return self.feed_link.feed.entry
|
||||
else:
|
||||
return None
|
||||
|
||||
positions = property(positions, doc="""
|
||||
Inlined PositionEntries are returned if PortfolioFeed was queried
|
||||
with positions='true'.""")
|
||||
|
||||
|
||||
def PortfolioEntryFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(PortfolioEntry, xml_string)
|
||||
|
||||
|
||||
class PortfolioFeed(gdata.GDataFeed):
|
||||
"""A feed that lists all of the user's portfolios.
|
||||
|
||||
A portfolio is a collection of positions that the user holds in various
|
||||
securities, plus metadata. The PortfolioFeed lists all of the user's
|
||||
portfolios as a list of PortfolioEntries.
|
||||
"""
|
||||
_tag = 'feed'
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PortfolioEntry])
|
||||
|
||||
|
||||
def PortfolioFeedFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(PortfolioFeed, xml_string)
|
||||
|
||||
|
||||
156
python/gdata/finance/data.py
Normal file
156
python/gdata/finance/data.py
Normal file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Contains the data classes of the Google Finance Portfolio Data API"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
import atom.data
|
||||
import gdata.data
|
||||
import gdata.opensearch.data
|
||||
|
||||
|
||||
GF_TEMPLATE = '{http://schemas.google.com/finance/2007/}%s'
|
||||
|
||||
|
||||
class Commission(atom.core.XmlElement):
|
||||
"""Commission for the transaction"""
|
||||
_qname = GF_TEMPLATE % 'commission'
|
||||
money = [gdata.data.Money]
|
||||
|
||||
|
||||
class CostBasis(atom.core.XmlElement):
|
||||
"""Cost basis for the portfolio or position"""
|
||||
_qname = GF_TEMPLATE % 'costBasis'
|
||||
money = [gdata.data.Money]
|
||||
|
||||
|
||||
class DaysGain(atom.core.XmlElement):
|
||||
"""Today's gain for the portfolio or position"""
|
||||
_qname = GF_TEMPLATE % 'daysGain'
|
||||
money = [gdata.data.Money]
|
||||
|
||||
|
||||
class Gain(atom.core.XmlElement):
|
||||
"""Total gain for the portfolio or position"""
|
||||
_qname = GF_TEMPLATE % 'gain'
|
||||
money = [gdata.data.Money]
|
||||
|
||||
|
||||
class MarketValue(atom.core.XmlElement):
|
||||
"""Market value for the portfolio or position"""
|
||||
_qname = GF_TEMPLATE % 'marketValue'
|
||||
money = [gdata.data.Money]
|
||||
|
||||
|
||||
class PortfolioData(atom.core.XmlElement):
|
||||
"""Data for the portfolio"""
|
||||
_qname = GF_TEMPLATE % 'portfolioData'
|
||||
return_overall = 'returnOverall'
|
||||
currency_code = 'currencyCode'
|
||||
return3y = 'return3y'
|
||||
return4w = 'return4w'
|
||||
market_value = MarketValue
|
||||
return_y_t_d = 'returnYTD'
|
||||
cost_basis = CostBasis
|
||||
gain_percentage = 'gainPercentage'
|
||||
days_gain = DaysGain
|
||||
return3m = 'return3m'
|
||||
return5y = 'return5y'
|
||||
return1w = 'return1w'
|
||||
gain = Gain
|
||||
return1y = 'return1y'
|
||||
|
||||
|
||||
class PortfolioEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry in a feed of Finance portfolios"""
|
||||
portfolio_data = PortfolioData
|
||||
|
||||
|
||||
class PortfolioFeed(gdata.data.GDFeed):
|
||||
"""Describes a Finance portfolio feed"""
|
||||
entry = [PortfolioEntry]
|
||||
|
||||
|
||||
class PositionData(atom.core.XmlElement):
|
||||
"""Data for the position"""
|
||||
_qname = GF_TEMPLATE % 'positionData'
|
||||
return_y_t_d = 'returnYTD'
|
||||
return5y = 'return5y'
|
||||
return_overall = 'returnOverall'
|
||||
cost_basis = CostBasis
|
||||
return3y = 'return3y'
|
||||
return1y = 'return1y'
|
||||
return4w = 'return4w'
|
||||
shares = 'shares'
|
||||
days_gain = DaysGain
|
||||
gain_percentage = 'gainPercentage'
|
||||
market_value = MarketValue
|
||||
gain = Gain
|
||||
return3m = 'return3m'
|
||||
return1w = 'return1w'
|
||||
|
||||
|
||||
class Price(atom.core.XmlElement):
|
||||
"""Price of the transaction"""
|
||||
_qname = GF_TEMPLATE % 'price'
|
||||
money = [gdata.data.Money]
|
||||
|
||||
|
||||
class Symbol(atom.core.XmlElement):
|
||||
"""Stock symbol for the company"""
|
||||
_qname = GF_TEMPLATE % 'symbol'
|
||||
symbol = 'symbol'
|
||||
exchange = 'exchange'
|
||||
full_name = 'fullName'
|
||||
|
||||
|
||||
class PositionEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry in a feed of Finance positions"""
|
||||
symbol = Symbol
|
||||
position_data = PositionData
|
||||
|
||||
|
||||
class PositionFeed(gdata.data.GDFeed):
|
||||
"""Describes a Finance position feed"""
|
||||
entry = [PositionEntry]
|
||||
|
||||
|
||||
class TransactionData(atom.core.XmlElement):
|
||||
"""Data for the transction"""
|
||||
_qname = GF_TEMPLATE % 'transactionData'
|
||||
shares = 'shares'
|
||||
notes = 'notes'
|
||||
date = 'date'
|
||||
type = 'type'
|
||||
commission = Commission
|
||||
price = Price
|
||||
|
||||
|
||||
class TransactionEntry(gdata.data.GDEntry):
|
||||
"""Describes an entry in a feed of Finance transactions"""
|
||||
transaction_data = TransactionData
|
||||
|
||||
|
||||
class TransactionFeed(gdata.data.GDFeed):
|
||||
"""Describes a Finance transaction feed"""
|
||||
entry = [TransactionEntry]
|
||||
|
||||
|
||||
243
python/gdata/finance/service.py
Normal file
243
python/gdata/finance/service.py
Normal file
@@ -0,0 +1,243 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Tan Swee Heng
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Classes to interact with the Google Finance server."""
|
||||
|
||||
|
||||
__author__ = 'thesweeheng@gmail.com'
|
||||
|
||||
|
||||
import gdata.service
|
||||
import gdata.finance
|
||||
import atom
|
||||
|
||||
|
||||
class PortfolioQuery(gdata.service.Query):
|
||||
"""A query object for the list of a user's portfolios."""
|
||||
|
||||
def returns(self):
|
||||
return self.get('returns', False)
|
||||
|
||||
def set_returns(self, value):
|
||||
if value is 'true' or value is True:
|
||||
self['returns'] = 'true'
|
||||
|
||||
returns = property(returns, set_returns, doc="The returns query parameter")
|
||||
|
||||
def positions(self):
|
||||
return self.get('positions', False)
|
||||
|
||||
def set_positions(self, value):
|
||||
if value is 'true' or value is True:
|
||||
self['positions'] = 'true'
|
||||
|
||||
positions = property(positions, set_positions,
|
||||
doc="The positions query parameter")
|
||||
|
||||
|
||||
class PositionQuery(gdata.service.Query):
|
||||
"""A query object for the list of a user's positions in a portfolio."""
|
||||
|
||||
def returns(self):
|
||||
return self.get('returns', False)
|
||||
|
||||
def set_returns(self, value):
|
||||
if value is 'true' or value is True:
|
||||
self['returns'] = 'true'
|
||||
|
||||
returns = property(returns, set_returns,
|
||||
doc="The returns query parameter")
|
||||
|
||||
def transactions(self):
|
||||
return self.get('transactions', False)
|
||||
|
||||
def set_transactions(self, value):
|
||||
if value is 'true' or value is True:
|
||||
self['transactions'] = 'true'
|
||||
|
||||
transactions = property(transactions, set_transactions,
|
||||
doc="The transactions query parameter")
|
||||
|
||||
|
||||
class FinanceService(gdata.service.GDataService):
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
server='finance.google.com', **kwargs):
|
||||
"""Creates a client for the Finance service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened. Default value: 'finance.google.com'.
|
||||
**kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
gdata.service.GDataService.__init__(self,
|
||||
email=email, password=password, service='finance', server=server,
|
||||
**kwargs)
|
||||
|
||||
def GetPortfolioFeed(self, query=None):
|
||||
uri = '/finance/feeds/default/portfolios'
|
||||
if query:
|
||||
uri = PortfolioQuery(feed=uri, params=query).ToUri()
|
||||
return self.Get(uri, converter=gdata.finance.PortfolioFeedFromString)
|
||||
|
||||
def GetPositionFeed(self, portfolio_entry=None, portfolio_id=None,
|
||||
query=None):
|
||||
"""
|
||||
Args:
|
||||
portfolio_entry: PortfolioEntry (optional; see Notes)
|
||||
portfolio_id: string (optional; see Notes) This may be obtained
|
||||
from a PortfolioEntry's portfolio_id attribute.
|
||||
query: PortfolioQuery (optional)
|
||||
|
||||
Notes:
|
||||
Either a PortfolioEntry OR a portfolio ID must be provided.
|
||||
"""
|
||||
if portfolio_entry:
|
||||
uri = portfolio_entry.GetSelfLink().href + '/positions'
|
||||
elif portfolio_id:
|
||||
uri = '/finance/feeds/default/portfolios/%s/positions' % portfolio_id
|
||||
if query:
|
||||
uri = PositionQuery(feed=uri, params=query).ToUri()
|
||||
return self.Get(uri, converter=gdata.finance.PositionFeedFromString)
|
||||
|
||||
def GetTransactionFeed(self, position_entry=None,
|
||||
portfolio_id=None, ticker_id=None):
|
||||
"""
|
||||
Args:
|
||||
position_entry: PositionEntry (optional; see Notes)
|
||||
portfolio_id: string (optional; see Notes) This may be obtained
|
||||
from a PortfolioEntry's portfolio_id attribute.
|
||||
ticker_id: string (optional; see Notes) This may be obtained from
|
||||
a PositionEntry's ticker_id attribute. Alternatively it can
|
||||
be constructed using the security's exchange and symbol,
|
||||
e.g. 'NASDAQ:GOOG'
|
||||
|
||||
Notes:
|
||||
Either a PositionEntry OR (a portfolio ID AND ticker ID) must
|
||||
be provided.
|
||||
"""
|
||||
if position_entry:
|
||||
uri = position_entry.GetSelfLink().href + '/transactions'
|
||||
elif portfolio_id and ticker_id:
|
||||
uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \
|
||||
% (portfolio_id, ticker_id)
|
||||
return self.Get(uri, converter=gdata.finance.TransactionFeedFromString)
|
||||
|
||||
def GetPortfolio(self, portfolio_id=None, query=None):
|
||||
uri = '/finance/feeds/default/portfolios/%s' % portfolio_id
|
||||
if query:
|
||||
uri = PortfolioQuery(feed=uri, params=query).ToUri()
|
||||
return self.Get(uri, converter=gdata.finance.PortfolioEntryFromString)
|
||||
|
||||
def AddPortfolio(self, portfolio_entry=None):
|
||||
uri = '/finance/feeds/default/portfolios'
|
||||
return self.Post(portfolio_entry, uri,
|
||||
converter=gdata.finance.PortfolioEntryFromString)
|
||||
|
||||
def UpdatePortfolio(self, portfolio_entry=None):
|
||||
uri = portfolio_entry.GetEditLink().href
|
||||
return self.Put(portfolio_entry, uri,
|
||||
converter=gdata.finance.PortfolioEntryFromString)
|
||||
|
||||
def DeletePortfolio(self, portfolio_entry=None):
|
||||
uri = portfolio_entry.GetEditLink().href
|
||||
return self.Delete(uri)
|
||||
|
||||
def GetPosition(self, portfolio_id=None, ticker_id=None, query=None):
|
||||
uri = '/finance/feeds/default/portfolios/%s/positions/%s' \
|
||||
% (portfolio_id, ticker_id)
|
||||
if query:
|
||||
uri = PositionQuery(feed=uri, params=query).ToUri()
|
||||
return self.Get(uri, converter=gdata.finance.PositionEntryFromString)
|
||||
|
||||
def DeletePosition(self, position_entry=None,
|
||||
portfolio_id=None, ticker_id=None, transaction_feed=None):
|
||||
"""A position is deleted by deleting all its transactions.
|
||||
|
||||
Args:
|
||||
position_entry: PositionEntry (optional; see Notes)
|
||||
portfolio_id: string (optional; see Notes) This may be obtained
|
||||
from a PortfolioEntry's portfolio_id attribute.
|
||||
ticker_id: string (optional; see Notes) This may be obtained from
|
||||
a PositionEntry's ticker_id attribute. Alternatively it can
|
||||
be constructed using the security's exchange and symbol,
|
||||
e.g. 'NASDAQ:GOOG'
|
||||
transaction_feed: TransactionFeed (optional; see Notes)
|
||||
|
||||
Notes:
|
||||
Either a PositionEntry OR (a portfolio ID AND ticker ID) OR
|
||||
a TransactionFeed must be provided.
|
||||
"""
|
||||
if transaction_feed:
|
||||
feed = transaction_feed
|
||||
else:
|
||||
if position_entry:
|
||||
feed = self.GetTransactionFeed(position_entry=position_entry)
|
||||
elif portfolio_id and ticker_id:
|
||||
feed = self.GetTransactionFeed(
|
||||
portfolio_id=portfolio_id, ticker_id=ticker_id)
|
||||
for txn in feed.entry:
|
||||
self.DeleteTransaction(txn)
|
||||
return True
|
||||
|
||||
def GetTransaction(self, portfolio_id=None, ticker_id=None,
|
||||
transaction_id=None):
|
||||
uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions/%s' \
|
||||
% (portfolio_id, ticker_id, transaction_id)
|
||||
return self.Get(uri, converter=gdata.finance.TransactionEntryFromString)
|
||||
|
||||
def AddTransaction(self, transaction_entry=None, transaction_feed = None,
|
||||
position_entry=None, portfolio_id=None, ticker_id=None):
|
||||
"""
|
||||
Args:
|
||||
transaction_entry: TransactionEntry (required)
|
||||
transaction_feed: TransactionFeed (optional; see Notes)
|
||||
position_entry: PositionEntry (optional; see Notes)
|
||||
portfolio_id: string (optional; see Notes) This may be obtained
|
||||
from a PortfolioEntry's portfolio_id attribute.
|
||||
ticker_id: string (optional; see Notes) This may be obtained from
|
||||
a PositionEntry's ticker_id attribute. Alternatively it can
|
||||
be constructed using the security's exchange and symbol,
|
||||
e.g. 'NASDAQ:GOOG'
|
||||
|
||||
Notes:
|
||||
Either a TransactionFeed OR a PositionEntry OR (a portfolio ID AND
|
||||
ticker ID) must be provided.
|
||||
"""
|
||||
if transaction_feed:
|
||||
uri = transaction_feed.GetPostLink().href
|
||||
elif position_entry:
|
||||
uri = position_entry.GetSelfLink().href + '/transactions'
|
||||
elif portfolio_id and ticker_id:
|
||||
uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \
|
||||
% (portfolio_id, ticker_id)
|
||||
return self.Post(transaction_entry, uri,
|
||||
converter=gdata.finance.TransactionEntryFromString)
|
||||
|
||||
def UpdateTransaction(self, transaction_entry=None):
|
||||
uri = transaction_entry.GetEditLink().href
|
||||
return self.Put(transaction_entry, uri,
|
||||
converter=gdata.finance.TransactionEntryFromString)
|
||||
|
||||
def DeleteTransaction(self, transaction_entry=None):
|
||||
uri = transaction_entry.GetEditLink().href
|
||||
return self.Delete(uri)
|
||||
1306
python/gdata/gauth.py
Normal file
1306
python/gdata/gauth.py
Normal file
File diff suppressed because it is too large
Load Diff
185
python/gdata/geo/__init__.py
Normal file
185
python/gdata/geo/__init__.py
Normal file
@@ -0,0 +1,185 @@
|
||||
# -*-*- encoding: utf-8 -*-*-
|
||||
#
|
||||
# This is gdata.photos.geo, implementing geological positioning in gdata structures
|
||||
#
|
||||
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
|
||||
#
|
||||
# Copyright 2007 Håvard Gulldahl
|
||||
# Portions copyright 2007 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Picasa Web Albums uses the georss and gml namespaces for
|
||||
elements defined in the GeoRSS and Geography Markup Language specifications.
|
||||
|
||||
Specifically, Picasa Web Albums uses the following elements:
|
||||
|
||||
georss:where
|
||||
gml:Point
|
||||
gml:pos
|
||||
|
||||
http://code.google.com/apis/picasaweb/reference.html#georss_reference
|
||||
|
||||
|
||||
Picasa Web Albums also accepts geographic-location data in two other formats:
|
||||
W3C format and plain-GeoRSS (without GML) format.
|
||||
"""
|
||||
#
|
||||
#Over the wire, the Picasa Web Albums only accepts and sends the
|
||||
#elements mentioned above, but this module will let you seamlessly convert
|
||||
#between the different formats (TODO 2007-10-18 hg)
|
||||
|
||||
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__
|
||||
__license__ = 'Apache License v2'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
|
||||
GML_NAMESPACE = 'http://www.opengis.net/gml'
|
||||
GEORSS_NAMESPACE = 'http://www.georss.org/georss'
|
||||
|
||||
class GeoBaseElement(atom.AtomBase):
|
||||
"""Base class for elements.
|
||||
|
||||
To add new elements, you only need to add the element tag name to self._tag
|
||||
and the namespace to self._namespace
|
||||
"""
|
||||
|
||||
_tag = ''
|
||||
_namespace = GML_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
def __init__(self, name=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.name = name
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
class Pos(GeoBaseElement):
|
||||
"""(string) Specifies a latitude and longitude, separated by a space,
|
||||
e.g. `35.669998 139.770004'"""
|
||||
|
||||
_tag = 'pos'
|
||||
def PosFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Pos, xml_string)
|
||||
|
||||
class Point(GeoBaseElement):
|
||||
"""(container) Specifies a particular geographical point, by means of
|
||||
a <gml:pos> element."""
|
||||
|
||||
_tag = 'Point'
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}pos' % GML_NAMESPACE] = ('pos', Pos)
|
||||
def __init__(self, pos=None, extension_elements=None, extension_attributes=None, text=None):
|
||||
GeoBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
if pos is None:
|
||||
pos = Pos()
|
||||
self.pos=pos
|
||||
def PointFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Point, xml_string)
|
||||
|
||||
class Where(GeoBaseElement):
|
||||
"""(container) Specifies a geographical location or region.
|
||||
A container element, containing a single <gml:Point> element.
|
||||
(Not to be confused with <gd:where>.)
|
||||
|
||||
Note that the (only) child attribute, .Point, is title-cased.
|
||||
This reflects the names of elements in the xml stream
|
||||
(principle of least surprise).
|
||||
|
||||
As a convenience, you can get a tuple of (lat, lon) with Where.location(),
|
||||
and set the same data with Where.setLocation( (lat, lon) ).
|
||||
|
||||
Similarly, there are methods to set and get only latitude and longitude.
|
||||
"""
|
||||
|
||||
_tag = 'where'
|
||||
_namespace = GEORSS_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}Point' % GML_NAMESPACE] = ('Point', Point)
|
||||
def __init__(self, point=None, extension_elements=None, extension_attributes=None, text=None):
|
||||
GeoBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
if point is None:
|
||||
point = Point()
|
||||
self.Point=point
|
||||
def location(self):
|
||||
"(float, float) Return Where.Point.pos.text as a (lat,lon) tuple"
|
||||
try:
|
||||
return tuple([float(z) for z in self.Point.pos.text.split(' ')])
|
||||
except AttributeError:
|
||||
return tuple()
|
||||
def set_location(self, latlon):
|
||||
"""(bool) Set Where.Point.pos.text from a (lat,lon) tuple.
|
||||
|
||||
Arguments:
|
||||
lat (float): The latitude in degrees, from -90.0 to 90.0
|
||||
lon (float): The longitude in degrees, from -180.0 to 180.0
|
||||
|
||||
Returns True on success.
|
||||
|
||||
"""
|
||||
|
||||
assert(isinstance(latlon[0], float))
|
||||
assert(isinstance(latlon[1], float))
|
||||
try:
|
||||
self.Point.pos.text = "%s %s" % (latlon[0], latlon[1])
|
||||
return True
|
||||
except AttributeError:
|
||||
return False
|
||||
def latitude(self):
|
||||
"(float) Get the latitude value of the geo-tag. See also .location()"
|
||||
lat, lon = self.location()
|
||||
return lat
|
||||
|
||||
def longitude(self):
|
||||
"(float) Get the longtitude value of the geo-tag. See also .location()"
|
||||
lat, lon = self.location()
|
||||
return lon
|
||||
|
||||
longtitude = longitude
|
||||
|
||||
def set_latitude(self, lat):
|
||||
"""(bool) Set the latitude value of the geo-tag.
|
||||
|
||||
Args:
|
||||
lat (float): The new latitude value
|
||||
|
||||
See also .set_location()
|
||||
"""
|
||||
_lat, lon = self.location()
|
||||
return self.set_location(lat, lon)
|
||||
|
||||
def set_longitude(self, lon):
|
||||
"""(bool) Set the longtitude value of the geo-tag.
|
||||
|
||||
Args:
|
||||
lat (float): The new latitude value
|
||||
|
||||
See also .set_location()
|
||||
"""
|
||||
lat, _lon = self.location()
|
||||
return self.set_location(lat, lon)
|
||||
|
||||
set_longtitude = set_longitude
|
||||
|
||||
def WhereFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Where, xml_string)
|
||||
|
||||
92
python/gdata/geo/data.py
Normal file
92
python/gdata/geo/data.py
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains the data classes of the Geography Extension"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
|
||||
|
||||
GEORSS_TEMPLATE = '{http://www.georss.org/georss/}%s'
|
||||
GML_TEMPLATE = '{http://www.opengis.net/gml/}%s'
|
||||
GEO_TEMPLATE = '{http://www.w3.org/2003/01/geo/wgs84_pos#/}%s'
|
||||
|
||||
|
||||
class GeoLat(atom.core.XmlElement):
|
||||
"""Describes a W3C latitude."""
|
||||
_qname = GEO_TEMPLATE % 'lat'
|
||||
|
||||
|
||||
class GeoLong(atom.core.XmlElement):
|
||||
"""Describes a W3C longitude."""
|
||||
_qname = GEO_TEMPLATE % 'long'
|
||||
|
||||
|
||||
class GeoRssBox(atom.core.XmlElement):
|
||||
"""Describes a geographical region."""
|
||||
_qname = GEORSS_TEMPLATE % 'box'
|
||||
|
||||
|
||||
class GeoRssPoint(atom.core.XmlElement):
|
||||
"""Describes a geographical location."""
|
||||
_qname = GEORSS_TEMPLATE % 'point'
|
||||
|
||||
|
||||
class GmlLowerCorner(atom.core.XmlElement):
|
||||
"""Describes a lower corner of a region."""
|
||||
_qname = GML_TEMPLATE % 'lowerCorner'
|
||||
|
||||
|
||||
class GmlPos(atom.core.XmlElement):
|
||||
"""Describes a latitude and longitude."""
|
||||
_qname = GML_TEMPLATE % 'pos'
|
||||
|
||||
|
||||
class GmlPoint(atom.core.XmlElement):
|
||||
"""Describes a particular geographical point."""
|
||||
_qname = GML_TEMPLATE % 'Point'
|
||||
pos = GmlPos
|
||||
|
||||
|
||||
class GmlUpperCorner(atom.core.XmlElement):
|
||||
"""Describes an upper corner of a region."""
|
||||
_qname = GML_TEMPLATE % 'upperCorner'
|
||||
|
||||
|
||||
class GmlEnvelope(atom.core.XmlElement):
|
||||
"""Describes a Gml geographical region."""
|
||||
_qname = GML_TEMPLATE % 'Envelope'
|
||||
lower_corner = GmlLowerCorner
|
||||
upper_corner = GmlUpperCorner
|
||||
|
||||
|
||||
class GeoRssWhere(atom.core.XmlElement):
|
||||
"""Describes a geographical location or region."""
|
||||
_qname = GEORSS_TEMPLATE % 'where'
|
||||
Point = GmlPoint
|
||||
Envelope = GmlEnvelope
|
||||
|
||||
|
||||
class W3CPoint(atom.core.XmlElement):
|
||||
"""Describes a W3C geographical location."""
|
||||
_qname = GEO_TEMPLATE % 'Point'
|
||||
long = GeoLong
|
||||
lat = GeoLat
|
||||
|
||||
|
||||
229
python/gdata/health/__init__.py
Normal file
229
python/gdata/health/__init__.py
Normal file
@@ -0,0 +1,229 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains extensions to Atom objects used with Google Health."""
|
||||
|
||||
__author__ = 'api.eric@google.com (Eric Bidelman)'
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
|
||||
CCR_NAMESPACE = 'urn:astm-org:CCR'
|
||||
METADATA_NAMESPACE = 'http://schemas.google.com/health/metadata'
|
||||
|
||||
|
||||
class Ccr(atom.AtomBase):
|
||||
"""Represents a Google Health <ContinuityOfCareRecord>."""
|
||||
|
||||
_tag = 'ContinuityOfCareRecord'
|
||||
_namespace = CCR_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
|
||||
def __init__(self, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
atom.AtomBase.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
def GetAlerts(self):
|
||||
"""Helper for extracting Alert/Allergy data from the CCR.
|
||||
|
||||
Returns:
|
||||
A list of ExtensionElements (one for each allergy found) or None if
|
||||
no allergies where found in this CCR.
|
||||
"""
|
||||
try:
|
||||
body = self.FindExtensions('Body')[0]
|
||||
return body.FindChildren('Alerts')[0].FindChildren('Alert')
|
||||
except:
|
||||
return None
|
||||
|
||||
def GetAllergies(self):
|
||||
"""Alias for GetAlerts()."""
|
||||
return self.GetAlerts()
|
||||
|
||||
def GetProblems(self):
|
||||
"""Helper for extracting Problem/Condition data from the CCR.
|
||||
|
||||
Returns:
|
||||
A list of ExtensionElements (one for each problem found) or None if
|
||||
no problems where found in this CCR.
|
||||
"""
|
||||
try:
|
||||
body = self.FindExtensions('Body')[0]
|
||||
return body.FindChildren('Problems')[0].FindChildren('Problem')
|
||||
except:
|
||||
return None
|
||||
|
||||
def GetConditions(self):
|
||||
"""Alias for GetProblems()."""
|
||||
return self.GetProblems()
|
||||
|
||||
def GetProcedures(self):
|
||||
"""Helper for extracting Procedure data from the CCR.
|
||||
|
||||
Returns:
|
||||
A list of ExtensionElements (one for each procedure found) or None if
|
||||
no procedures where found in this CCR.
|
||||
"""
|
||||
try:
|
||||
body = self.FindExtensions('Body')[0]
|
||||
return body.FindChildren('Procedures')[0].FindChildren('Procedure')
|
||||
except:
|
||||
return None
|
||||
|
||||
def GetImmunizations(self):
|
||||
"""Helper for extracting Immunization data from the CCR.
|
||||
|
||||
Returns:
|
||||
A list of ExtensionElements (one for each immunization found) or None if
|
||||
no immunizations where found in this CCR.
|
||||
"""
|
||||
try:
|
||||
body = self.FindExtensions('Body')[0]
|
||||
return body.FindChildren('Immunizations')[0].FindChildren('Immunization')
|
||||
except:
|
||||
return None
|
||||
|
||||
def GetMedications(self):
|
||||
"""Helper for extracting Medication data from the CCR.
|
||||
|
||||
Returns:
|
||||
A list of ExtensionElements (one for each medication found) or None if
|
||||
no medications where found in this CCR.
|
||||
"""
|
||||
try:
|
||||
body = self.FindExtensions('Body')[0]
|
||||
return body.FindChildren('Medications')[0].FindChildren('Medication')
|
||||
except:
|
||||
return None
|
||||
|
||||
def GetResults(self):
|
||||
"""Helper for extracting Results/Labresults data from the CCR.
|
||||
|
||||
Returns:
|
||||
A list of ExtensionElements (one for each result found) or None if
|
||||
no results where found in this CCR.
|
||||
"""
|
||||
try:
|
||||
body = self.FindExtensions('Body')[0]
|
||||
return body.FindChildren('Results')[0].FindChildren('Result')
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
class ProfileEntry(gdata.GDataEntry):
|
||||
"""The Google Health version of an Atom Entry."""
|
||||
|
||||
_tag = gdata.GDataEntry._tag
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
_children['{%s}ContinuityOfCareRecord' % CCR_NAMESPACE] = ('ccr', Ccr)
|
||||
|
||||
def __init__(self, ccr=None, author=None, category=None, content=None,
|
||||
atom_id=None, link=None, published=None, title=None,
|
||||
updated=None, text=None, extension_elements=None,
|
||||
extension_attributes=None):
|
||||
self.ccr = ccr
|
||||
gdata.GDataEntry.__init__(
|
||||
self, author=author, category=category, content=content,
|
||||
atom_id=atom_id, link=link, published=published, title=title,
|
||||
updated=updated, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes, text=text)
|
||||
|
||||
|
||||
class ProfileFeed(gdata.GDataFeed):
|
||||
"""A feed containing a list of Google Health profile entries."""
|
||||
|
||||
_tag = gdata.GDataFeed._tag
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry])
|
||||
|
||||
|
||||
class ProfileListEntry(gdata.GDataEntry):
|
||||
"""The Atom Entry in the Google Health profile list feed."""
|
||||
|
||||
_tag = gdata.GDataEntry._tag
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataEntry._children.copy()
|
||||
_attributes = gdata.GDataEntry._attributes.copy()
|
||||
|
||||
def GetProfileId(self):
|
||||
return self.content.text
|
||||
|
||||
def GetProfileName(self):
|
||||
return self.title.text
|
||||
|
||||
|
||||
class ProfileListFeed(gdata.GDataFeed):
|
||||
"""A feed containing a list of Google Health profile list entries."""
|
||||
|
||||
_tag = gdata.GDataFeed._tag
|
||||
_namespace = atom.ATOM_NAMESPACE
|
||||
_children = gdata.GDataFeed._children.copy()
|
||||
_attributes = gdata.GDataFeed._attributes.copy()
|
||||
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileListEntry])
|
||||
|
||||
|
||||
def ProfileEntryFromString(xml_string):
|
||||
"""Converts an XML string into a ProfileEntry object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Health profile feed entry.
|
||||
|
||||
Returns:
|
||||
A ProfileEntry object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(ProfileEntry, xml_string)
|
||||
|
||||
|
||||
def ProfileListEntryFromString(xml_string):
|
||||
"""Converts an XML string into a ProfileListEntry object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a Health profile list feed entry.
|
||||
|
||||
Returns:
|
||||
A ProfileListEntry object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(ProfileListEntry, xml_string)
|
||||
|
||||
|
||||
def ProfileFeedFromString(xml_string):
|
||||
"""Converts an XML string into a ProfileFeed object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a ProfileFeed feed.
|
||||
|
||||
Returns:
|
||||
A ProfileFeed object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(ProfileFeed, xml_string)
|
||||
|
||||
|
||||
def ProfileListFeedFromString(xml_string):
|
||||
"""Converts an XML string into a ProfileListFeed object.
|
||||
|
||||
Args:
|
||||
xml_string: string The XML describing a ProfileListFeed feed.
|
||||
|
||||
Returns:
|
||||
A ProfileListFeed object corresponding to the given XML.
|
||||
"""
|
||||
return atom.CreateClassFromXMLString(ProfileListFeed, xml_string)
|
||||
263
python/gdata/health/service.py
Normal file
263
python/gdata/health/service.py
Normal file
@@ -0,0 +1,263 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2009 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""HealthService extends GDataService to streamline Google Health API access.
|
||||
|
||||
HealthService: Provides methods to interact with the profile, profile list,
|
||||
and register/notices feeds. Extends GDataService.
|
||||
|
||||
HealthProfileQuery: Queries the Google Health Profile feed.
|
||||
|
||||
HealthProfileListQuery: Queries the Google Health Profile list feed.
|
||||
"""
|
||||
|
||||
__author__ = 'api.eric@google.com (Eric Bidelman)'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata.health
|
||||
import gdata.service
|
||||
|
||||
|
||||
class HealthService(gdata.service.GDataService):
|
||||
|
||||
"""Client extension for the Google Health service Document List feed."""
|
||||
|
||||
def __init__(self, email=None, password=None, source=None,
|
||||
use_h9_sandbox=False, server='www.google.com',
|
||||
additional_headers=None, **kwargs):
|
||||
"""Creates a client for the Google Health service.
|
||||
|
||||
Args:
|
||||
email: string (optional) The user's email address, used for
|
||||
authentication.
|
||||
password: string (optional) The user's password.
|
||||
source: string (optional) The name of the user's application.
|
||||
use_h9_sandbox: boolean (optional) True to issue requests against the
|
||||
/h9 developer's sandbox.
|
||||
server: string (optional) The name of the server to which a connection
|
||||
will be opened.
|
||||
additional_headers: dictionary (optional) Any additional headers which
|
||||
should be included with CRUD operations.
|
||||
kwargs: The other parameters to pass to gdata.service.GDataService
|
||||
constructor.
|
||||
"""
|
||||
service = use_h9_sandbox and 'weaver' or 'health'
|
||||
gdata.service.GDataService.__init__(
|
||||
self, email=email, password=password, service=service, source=source,
|
||||
server=server, additional_headers=additional_headers, **kwargs)
|
||||
self.ssl = True
|
||||
self.use_h9_sandbox = use_h9_sandbox
|
||||
|
||||
def __get_service(self):
|
||||
return self.use_h9_sandbox and 'h9' or 'health'
|
||||
|
||||
def GetProfileFeed(self, query=None, profile_id=None):
|
||||
"""Fetches the users Google Health profile feed.
|
||||
|
||||
Args:
|
||||
query: HealthProfileQuery or string (optional) A query to use on the
|
||||
profile feed. If None, a HealthProfileQuery is constructed.
|
||||
profile_id: string (optional) The profile id to query the profile feed
|
||||
with when using ClientLogin. Note: this parameter is ignored if
|
||||
query is set.
|
||||
|
||||
Returns:
|
||||
A gdata.health.ProfileFeed object containing the user's Health profile.
|
||||
"""
|
||||
if query is None:
|
||||
projection = profile_id and 'ui' or 'default'
|
||||
uri = HealthProfileQuery(
|
||||
service=self.__get_service(), projection=projection,
|
||||
profile_id=profile_id).ToUri()
|
||||
elif isinstance(query, HealthProfileQuery):
|
||||
uri = query.ToUri()
|
||||
else:
|
||||
uri = query
|
||||
|
||||
return self.GetFeed(uri, converter=gdata.health.ProfileFeedFromString)
|
||||
|
||||
def GetProfileListFeed(self, query=None):
|
||||
"""Fetches the users Google Health profile feed.
|
||||
|
||||
Args:
|
||||
query: HealthProfileListQuery or string (optional) A query to use
|
||||
on the profile list feed. If None, a HealthProfileListQuery is
|
||||
constructed to /health/feeds/profile/list or /h9/feeds/profile/list.
|
||||
|
||||
Returns:
|
||||
A gdata.health.ProfileListFeed object containing the user's list
|
||||
of profiles.
|
||||
"""
|
||||
if not query:
|
||||
uri = HealthProfileListQuery(service=self.__get_service()).ToUri()
|
||||
elif isinstance(query, HealthProfileListQuery):
|
||||
uri = query.ToUri()
|
||||
else:
|
||||
uri = query
|
||||
|
||||
return self.GetFeed(uri, converter=gdata.health.ProfileListFeedFromString)
|
||||
|
||||
def SendNotice(self, subject, body=None, content_type='html',
|
||||
ccr=None, profile_id=None):
|
||||
"""Sends (posts) a notice to the user's Google Health profile.
|
||||
|
||||
Args:
|
||||
subject: A string representing the message's subject line.
|
||||
body: string (optional) The message body.
|
||||
content_type: string (optional) The content type of the notice message
|
||||
body. This parameter is only honored when a message body is
|
||||
specified.
|
||||
ccr: string (optional) The CCR XML document to reconcile into the
|
||||
user's profile.
|
||||
profile_id: string (optional) The profile id to work with when using
|
||||
ClientLogin. Note: this parameter is ignored if query is set.
|
||||
|
||||
Returns:
|
||||
A gdata.health.ProfileEntry object of the posted entry.
|
||||
"""
|
||||
if body:
|
||||
content = atom.Content(content_type=content_type, text=body)
|
||||
else:
|
||||
content = body
|
||||
|
||||
entry = gdata.GDataEntry(
|
||||
title=atom.Title(text=subject), content=content,
|
||||
extension_elements=[atom.ExtensionElementFromString(ccr)])
|
||||
|
||||
projection = profile_id and 'ui' or 'default'
|
||||
query = HealthRegisterQuery(service=self.__get_service(),
|
||||
projection=projection, profile_id=profile_id)
|
||||
return self.Post(entry, query.ToUri(),
|
||||
converter=gdata.health.ProfileEntryFromString)
|
||||
|
||||
|
||||
class HealthProfileQuery(gdata.service.Query):
|
||||
|
||||
"""Object used to construct a URI to query the Google Health profile feed."""
|
||||
|
||||
def __init__(self, service='health', feed='feeds/profile',
|
||||
projection='default', profile_id=None, text_query=None,
|
||||
params=None, categories=None):
|
||||
"""Constructor for Health profile feed query.
|
||||
|
||||
Args:
|
||||
service: string (optional) The service to query. Either 'health' or 'h9'.
|
||||
feed: string (optional) The path for the feed. The default value is
|
||||
'feeds/profile'.
|
||||
projection: string (optional) The visibility of the data. Possible values
|
||||
are 'default' for AuthSub and 'ui' for ClientLogin. If this value
|
||||
is set to 'ui', the profile_id parameter should also be set.
|
||||
profile_id: string (optional) The profile id to query. This should only
|
||||
be used when using ClientLogin.
|
||||
text_query: str (optional) The contents of the q query parameter. The
|
||||
contents of the text_query are URL escaped upon conversion to a URI.
|
||||
Note: this parameter can only be used on the register feed using
|
||||
ClientLogin.
|
||||
params: dict (optional) Parameter value string pairs which become URL
|
||||
params when translated to a URI. These parameters are added to
|
||||
the query's items.
|
||||
categories: list (optional) List of category strings which should be
|
||||
included as query categories. See gdata.service.Query for
|
||||
additional documentation.
|
||||
"""
|
||||
self.service = service
|
||||
self.profile_id = profile_id
|
||||
self.projection = projection
|
||||
gdata.service.Query.__init__(self, feed=feed, text_query=text_query,
|
||||
params=params, categories=categories)
|
||||
|
||||
def ToUri(self):
|
||||
"""Generates a URI from the query parameters set in the object.
|
||||
|
||||
Returns:
|
||||
A string containing the URI used to retrieve entries from the Health
|
||||
profile feed.
|
||||
"""
|
||||
old_feed = self.feed
|
||||
self.feed = '/'.join([self.service, old_feed, self.projection])
|
||||
|
||||
if self.profile_id:
|
||||
self.feed += '/' + self.profile_id
|
||||
self.feed = '/%s' % (self.feed,)
|
||||
|
||||
new_feed = gdata.service.Query.ToUri(self)
|
||||
self.feed = old_feed
|
||||
return new_feed
|
||||
|
||||
|
||||
class HealthProfileListQuery(gdata.service.Query):
|
||||
|
||||
"""Object used to construct a URI to query a Health profile list feed."""
|
||||
|
||||
def __init__(self, service='health', feed='feeds/profile/list'):
|
||||
"""Constructor for Health profile list feed query.
|
||||
|
||||
Args:
|
||||
service: string (optional) The service to query. Either 'health' or 'h9'.
|
||||
feed: string (optional) The path for the feed. The default value is
|
||||
'feeds/profile/list'.
|
||||
"""
|
||||
gdata.service.Query.__init__(self, feed)
|
||||
self.service = service
|
||||
|
||||
def ToUri(self):
|
||||
"""Generates a URI from the query parameters set in the object.
|
||||
|
||||
Returns:
|
||||
A string containing the URI used to retrieve entries from the
|
||||
profile list feed.
|
||||
"""
|
||||
return '/%s' % ('/'.join([self.service, self.feed]),)
|
||||
|
||||
|
||||
class HealthRegisterQuery(gdata.service.Query):
|
||||
|
||||
"""Object used to construct a URI to query a Health register/notice feed."""
|
||||
|
||||
def __init__(self, service='health', feed='feeds/register',
|
||||
projection='default', profile_id=None):
|
||||
"""Constructor for Health profile list feed query.
|
||||
|
||||
Args:
|
||||
service: string (optional) The service to query. Either 'health' or 'h9'.
|
||||
feed: string (optional) The path for the feed. The default value is
|
||||
'feeds/register'.
|
||||
projection: string (optional) The visibility of the data. Possible values
|
||||
are 'default' for AuthSub and 'ui' for ClientLogin. If this value
|
||||
is set to 'ui', the profile_id parameter should also be set.
|
||||
profile_id: string (optional) The profile id to query. This should only
|
||||
be used when using ClientLogin.
|
||||
"""
|
||||
gdata.service.Query.__init__(self, feed)
|
||||
self.service = service
|
||||
self.projection = projection
|
||||
self.profile_id = profile_id
|
||||
|
||||
def ToUri(self):
|
||||
"""Generates a URI from the query parameters set in the object.
|
||||
|
||||
Returns:
|
||||
A string containing the URI needed to interact with the register feed.
|
||||
"""
|
||||
old_feed = self.feed
|
||||
self.feed = '/'.join([self.service, old_feed, self.projection])
|
||||
new_feed = gdata.service.Query.ToUri(self)
|
||||
self.feed = old_feed
|
||||
|
||||
if self.profile_id:
|
||||
new_feed += '/' + self.profile_id
|
||||
return '/%s' % (new_feed,)
|
||||
0
python/gdata/maps/__init__.py
Normal file
0
python/gdata/maps/__init__.py
Normal file
179
python/gdata/maps/client.py
Normal file
179
python/gdata/maps/client.py
Normal file
@@ -0,0 +1,179 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Contains a client to communicate with the Maps Data servers.
|
||||
|
||||
For documentation on the Maps Data API, see:
|
||||
http://code.google.com/apis/maps/documentation/mapsdata/
|
||||
"""
|
||||
|
||||
|
||||
__author__ = 'api.roman.public@google.com (Roman Nurik)'
|
||||
|
||||
|
||||
import gdata.client
|
||||
import gdata.maps.data
|
||||
import atom.data
|
||||
import atom.http_core
|
||||
import gdata.gauth
|
||||
|
||||
|
||||
# List user's maps, takes a user ID, or 'default'.
|
||||
MAP_URL_TEMPLATE = 'http://maps.google.com/maps/feeds/maps/%s/full'
|
||||
|
||||
# List map's features, takes a user ID (or 'default') and map ID.
|
||||
MAP_FEATURE_URL_TEMPLATE = ('http://maps.google.com/maps'
|
||||
'/feeds/features/%s/%s/full')
|
||||
|
||||
# The KML mime type
|
||||
KML_CONTENT_TYPE = 'application/vnd.google-earth.kml+xml'
|
||||
|
||||
|
||||
class MapsClient(gdata.client.GDClient):
|
||||
"""Maps Data API GData client."""
|
||||
|
||||
api_version = '2'
|
||||
auth_service = 'local'
|
||||
auth_scopes = gdata.gauth.AUTH_SCOPES['local']
|
||||
|
||||
def get_maps(self, user_id='default', auth_token=None,
|
||||
desired_class=gdata.maps.data.MapFeed, **kwargs):
|
||||
"""Retrieves a Map feed for the given user ID.
|
||||
|
||||
Args:
|
||||
user_id: An optional string representing the user ID; should be 'default'.
|
||||
|
||||
Returns:
|
||||
A gdata.maps.data.MapFeed.
|
||||
"""
|
||||
return self.get_feed(MAP_URL_TEMPLATE % user_id, auth_token=auth_token,
|
||||
desired_class=desired_class, **kwargs)
|
||||
|
||||
GetMaps = get_maps
|
||||
|
||||
def get_features(self, map_id, user_id='default', auth_token=None,
|
||||
desired_class=gdata.maps.data.FeatureFeed, query=None,
|
||||
**kwargs):
|
||||
"""Retrieves a Feature feed for the given map ID/user ID combination.
|
||||
|
||||
Args:
|
||||
map_id: A string representing the ID of the map whose features should be
|
||||
retrieved.
|
||||
user_id: An optional string representing the user ID; should be 'default'.
|
||||
|
||||
Returns:
|
||||
A gdata.maps.data.FeatureFeed.
|
||||
"""
|
||||
return self.get_feed(MAP_FEATURE_URL_TEMPLATE % (user_id, map_id),
|
||||
auth_token=auth_token, desired_class=desired_class,
|
||||
query=query, **kwargs)
|
||||
|
||||
GetFeatures = get_features
|
||||
|
||||
def create_map(self, title, summary=None, unlisted=False,
|
||||
auth_token=None, title_type='text', summary_type='text',
|
||||
**kwargs):
|
||||
"""Creates a new map and posts it to the Maps Data servers.
|
||||
|
||||
Args:
|
||||
title: A string representing the title of the new map.
|
||||
summary: An optional string representing the new map's description.
|
||||
unlisted: An optional boolean identifying whether the map should be
|
||||
unlisted (True) or public (False). Default False.
|
||||
|
||||
Returns:
|
||||
A gdata.maps.data.Map.
|
||||
"""
|
||||
new_entry = gdata.maps.data.Map(
|
||||
title=atom.data.Title(text=title, type=title_type))
|
||||
if summary:
|
||||
new_entry.summary = atom.data.Summary(text=summary, type=summary_type)
|
||||
if unlisted:
|
||||
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
|
||||
return self.post(new_entry, MAP_URL_TEMPLATE % 'default',
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
CreateMap = create_map
|
||||
|
||||
def add_feature(self, map_id, title, content,
|
||||
auth_token=None, title_type='text',
|
||||
content_type=KML_CONTENT_TYPE, **kwargs):
|
||||
"""Adds a new feature to the given map.
|
||||
|
||||
Args:
|
||||
map_id: A string representing the ID of the map to which the new feature
|
||||
should be added.
|
||||
title: A string representing the name/title of the new feature.
|
||||
content: A KML string or gdata.maps.data.KmlContent object representing
|
||||
the new feature's KML contents, including its description.
|
||||
|
||||
Returns:
|
||||
A gdata.maps.data.Feature.
|
||||
"""
|
||||
if content_type == KML_CONTENT_TYPE:
|
||||
if type(content) != gdata.maps.data.KmlContent:
|
||||
content = gdata.maps.data.KmlContent(kml=content)
|
||||
else:
|
||||
content = atom.data.Content(content=content, type=content_type)
|
||||
new_entry = gdata.maps.data.Feature(
|
||||
title=atom.data.Title(text=title, type=title_type),
|
||||
content=content)
|
||||
return self.post(new_entry, MAP_FEATURE_URL_TEMPLATE % ('default', map_id),
|
||||
auth_token=auth_token, **kwargs)
|
||||
|
||||
AddFeature = add_feature
|
||||
|
||||
def update(self, entry, auth_token=None, **kwargs):
|
||||
"""Sends changes to a given map or feature entry to the Maps Data servers.
|
||||
|
||||
Args:
|
||||
entry: A gdata.maps.data.Map or gdata.maps.data.Feature to be updated
|
||||
server-side.
|
||||
"""
|
||||
# The Maps Data API does not currently support ETags, so for now remove
|
||||
# the ETag before performing an update.
|
||||
old_etag = entry.etag
|
||||
entry.etag = None
|
||||
response = gdata.client.GDClient.update(self, entry,
|
||||
auth_token=auth_token, **kwargs)
|
||||
entry.etag = old_etag
|
||||
return response
|
||||
|
||||
Update = update
|
||||
|
||||
def delete(self, entry_or_uri, auth_token=None, **kwargs):
|
||||
"""Deletes the given entry or entry URI server-side.
|
||||
|
||||
Args:
|
||||
entry_or_uri: A gdata.maps.data.Map, gdata.maps.data.Feature, or URI
|
||||
string representing the entry to delete.
|
||||
"""
|
||||
if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)):
|
||||
return gdata.client.GDClient.delete(self, entry_or_uri,
|
||||
auth_token=auth_token, **kwargs)
|
||||
# The Maps Data API does not currently support ETags, so for now remove
|
||||
# the ETag before performing a delete.
|
||||
old_etag = entry_or_uri.etag
|
||||
entry_or_uri.etag = None
|
||||
response = gdata.client.GDClient.delete(self, entry_or_uri,
|
||||
auth_token=auth_token, **kwargs)
|
||||
# TODO: if GDClient.delete raises and exception, the entry's etag may be
|
||||
# left as None. Should revisit this logic.
|
||||
entry_or_uri.etag = old_etag
|
||||
return response
|
||||
|
||||
Delete = delete
|
||||
125
python/gdata/maps/data.py
Normal file
125
python/gdata/maps/data.py
Normal file
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Data model classes for parsing and generating XML for the Maps Data API."""
|
||||
|
||||
|
||||
__author__ = 'api.roman.public@google.com (Roman Nurik)'
|
||||
|
||||
|
||||
import re
|
||||
import atom.core
|
||||
import gdata.data
|
||||
|
||||
|
||||
MAP_ATOM_ID_PATTERN = re.compile('/maps/feeds/maps/'
|
||||
'(?P<user_id>\w+)/'
|
||||
'(?P<map_id>\w+)$')
|
||||
|
||||
FEATURE_ATOM_ID_PATTERN = re.compile('/maps/feeds/features/'
|
||||
'(?P<user_id>\w+)/'
|
||||
'(?P<map_id>\w+)/'
|
||||
'(?P<feature_id>\w+)$')
|
||||
|
||||
# The KML mime type
|
||||
KML_CONTENT_TYPE = 'application/vnd.google-earth.kml+xml'
|
||||
|
||||
# The OGC KML 2.2 namespace
|
||||
KML_NAMESPACE = 'http://www.opengis.net/kml/2.2'
|
||||
|
||||
class MapsDataEntry(gdata.data.GDEntry):
|
||||
"""Adds convenience methods inherited by all Maps Data entries."""
|
||||
|
||||
def get_user_id(self):
|
||||
"""Extracts the user ID of this entry."""
|
||||
if self.id.text:
|
||||
match = self.__class__.atom_id_pattern.search(self.id.text)
|
||||
if match:
|
||||
return match.group('user_id')
|
||||
return None
|
||||
|
||||
GetUserId = get_user_id
|
||||
|
||||
def get_map_id(self):
|
||||
"""Extracts the map ID of this entry."""
|
||||
if self.id.text:
|
||||
match = self.__class__.atom_id_pattern.search(self.id.text)
|
||||
if match:
|
||||
return match.group('map_id')
|
||||
return None
|
||||
|
||||
GetMapId = get_map_id
|
||||
|
||||
|
||||
class Map(MapsDataEntry):
|
||||
"""Represents a map which belongs to the user."""
|
||||
atom_id_pattern = MAP_ATOM_ID_PATTERN
|
||||
|
||||
|
||||
class MapFeed(gdata.data.GDFeed):
|
||||
"""Represents an atom feed of maps."""
|
||||
entry = [Map]
|
||||
|
||||
|
||||
class KmlContent(atom.data.Content):
|
||||
"""Represents an atom content element that encapsulates KML content."""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(KmlContent, self).__init__(type=KML_CONTENT_TYPE, **kwargs)
|
||||
if 'kml' in kwargs:
|
||||
self.kml = kwargs['kml']
|
||||
|
||||
def _get_kml(self):
|
||||
if self.children:
|
||||
return self.children[0]
|
||||
else:
|
||||
return ''
|
||||
|
||||
def _set_kml(self, kml):
|
||||
if not kml:
|
||||
self.children = []
|
||||
return
|
||||
|
||||
if type(kml) == str:
|
||||
kml = atom.core.parse(kml)
|
||||
if not kml.namespace:
|
||||
kml.namespace = KML_NAMESPACE
|
||||
|
||||
self.children = [kml]
|
||||
|
||||
kml = property(_get_kml, _set_kml)
|
||||
|
||||
|
||||
class Feature(MapsDataEntry):
|
||||
"""Represents a single feature in a map."""
|
||||
atom_id_pattern = FEATURE_ATOM_ID_PATTERN
|
||||
content = KmlContent
|
||||
|
||||
def get_feature_id(self):
|
||||
"""Extracts the feature ID of this feature."""
|
||||
if self.id.text:
|
||||
match = self.__class__.atom_id_pattern.search(self.id.text)
|
||||
if match:
|
||||
return match.group('feature_id')
|
||||
return None
|
||||
|
||||
GetFeatureId = get_feature_id
|
||||
|
||||
|
||||
class FeatureFeed(gdata.data.GDFeed):
|
||||
"""Represents an atom feed of features."""
|
||||
entry = [Feature]
|
||||
355
python/gdata/media/__init__.py
Normal file
355
python/gdata/media/__init__.py
Normal file
@@ -0,0 +1,355 @@
|
||||
# -*-*- encoding: utf-8 -*-*-
|
||||
#
|
||||
# This is gdata.photos.media, implementing parts of the MediaRSS spec in gdata structures
|
||||
#
|
||||
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
|
||||
#
|
||||
# Copyright 2007 Håvard Gulldahl
|
||||
# Portions copyright 2007 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Essential attributes of photos in Google Photos/Picasa Web Albums are
|
||||
expressed using elements from the `media' namespace, defined in the
|
||||
MediaRSS specification[1].
|
||||
|
||||
Due to copyright issues, the elements herein are documented sparingly, please
|
||||
consult with the Google Photos API Reference Guide[2], alternatively the
|
||||
official MediaRSS specification[1] for details.
|
||||
(If there is a version conflict between the two sources, stick to the
|
||||
Google Photos API).
|
||||
|
||||
[1]: http://search.yahoo.com/mrss (version 1.1.1)
|
||||
[2]: http://code.google.com/apis/picasaweb/reference.html#media_reference
|
||||
|
||||
Keep in mind that Google Photos only uses a subset of the MediaRSS elements
|
||||
(and some of the attributes are trimmed down, too):
|
||||
|
||||
media:content
|
||||
media:credit
|
||||
media:description
|
||||
media:group
|
||||
media:keywords
|
||||
media:thumbnail
|
||||
media:title
|
||||
"""
|
||||
|
||||
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__
|
||||
__license__ = 'Apache License v2'
|
||||
|
||||
|
||||
import atom
|
||||
import gdata
|
||||
|
||||
MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/'
|
||||
YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007'
|
||||
|
||||
|
||||
class MediaBaseElement(atom.AtomBase):
|
||||
"""Base class for elements in the MEDIA_NAMESPACE.
|
||||
To add new elements, you only need to add the element tag name to self._tag
|
||||
"""
|
||||
|
||||
_tag = ''
|
||||
_namespace = MEDIA_NAMESPACE
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
|
||||
def __init__(self, name=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
self.name = name
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class Content(MediaBaseElement):
|
||||
"""(attribute container) This element describes the original content,
|
||||
e.g. an image or a video. There may be multiple Content elements
|
||||
in a media:Group.
|
||||
|
||||
For example, a video may have a
|
||||
<media:content medium="image"> element that specifies a JPEG
|
||||
representation of the video, and a <media:content medium="video">
|
||||
element that specifies the URL of the video itself.
|
||||
|
||||
Attributes:
|
||||
url: non-ambigous reference to online object
|
||||
width: width of the object frame, in pixels
|
||||
height: width of the object frame, in pixels
|
||||
medium: one of `image' or `video', allowing the api user to quickly
|
||||
determine the object's type
|
||||
type: Internet media Type[1] (a.k.a. mime type) of the object -- a more
|
||||
verbose way of determining the media type. To set the type member
|
||||
in the contructor, use the content_type parameter.
|
||||
(optional) fileSize: the size of the object, in bytes
|
||||
|
||||
[1]: http://en.wikipedia.org/wiki/Internet_media_type
|
||||
"""
|
||||
|
||||
_tag = 'content'
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['url'] = 'url'
|
||||
_attributes['width'] = 'width'
|
||||
_attributes['height'] = 'height'
|
||||
_attributes['medium'] = 'medium'
|
||||
_attributes['type'] = 'type'
|
||||
_attributes['fileSize'] = 'fileSize'
|
||||
|
||||
def __init__(self, url=None, width=None, height=None,
|
||||
medium=None, content_type=None, fileSize=None, format=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
MediaBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
self.url = url
|
||||
self.width = width
|
||||
self.height = height
|
||||
self.medium = medium
|
||||
self.type = content_type
|
||||
self.fileSize = fileSize
|
||||
|
||||
|
||||
def ContentFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Content, xml_string)
|
||||
|
||||
|
||||
class Credit(MediaBaseElement):
|
||||
"""(string) Contains the nickname of the user who created the content,
|
||||
e.g. `Liz Bennet'.
|
||||
|
||||
This is a user-specified value that should be used when referring to
|
||||
the user by name.
|
||||
|
||||
Note that none of the attributes from the MediaRSS spec are supported.
|
||||
"""
|
||||
|
||||
_tag = 'credit'
|
||||
|
||||
|
||||
def CreditFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Credit, xml_string)
|
||||
|
||||
|
||||
class Description(MediaBaseElement):
|
||||
"""(string) A description of the media object.
|
||||
Either plain unicode text, or entity-encoded html (look at the `type'
|
||||
attribute).
|
||||
|
||||
E.g `A set of photographs I took while vacationing in Italy.'
|
||||
|
||||
For `api' projections, the description is in plain text;
|
||||
for `base' projections, the description is in HTML.
|
||||
|
||||
Attributes:
|
||||
type: either `text' or `html'. To set the type member in the contructor,
|
||||
use the description_type parameter.
|
||||
"""
|
||||
|
||||
_tag = 'description'
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['type'] = 'type'
|
||||
def __init__(self, description_type=None,
|
||||
extension_elements=None, extension_attributes=None, text=None):
|
||||
MediaBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
|
||||
self.type = description_type
|
||||
|
||||
|
||||
def DescriptionFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Description, xml_string)
|
||||
|
||||
|
||||
class Keywords(MediaBaseElement):
|
||||
"""(string) Lists the tags associated with the entry,
|
||||
e.g `italy, vacation, sunset'.
|
||||
|
||||
Contains a comma-separated list of tags that have been added to the photo, or
|
||||
all tags that have been added to photos in the album.
|
||||
"""
|
||||
|
||||
_tag = 'keywords'
|
||||
|
||||
|
||||
def KeywordsFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Keywords, xml_string)
|
||||
|
||||
|
||||
class Thumbnail(MediaBaseElement):
|
||||
"""(attributes) Contains the URL of a thumbnail of a photo or album cover.
|
||||
|
||||
There can be multiple <media:thumbnail> elements for a given <media:group>;
|
||||
for example, a given item may have multiple thumbnails at different sizes.
|
||||
Photos generally have two thumbnails at different sizes;
|
||||
albums generally have one cropped thumbnail.
|
||||
|
||||
If the thumbsize parameter is set to the initial query, this element points
|
||||
to thumbnails of the requested sizes; otherwise the thumbnails are the
|
||||
default thumbnail size.
|
||||
|
||||
This element must not be confused with the <gphoto:thumbnail> element.
|
||||
|
||||
Attributes:
|
||||
url: The URL of the thumbnail image.
|
||||
height: The height of the thumbnail image, in pixels.
|
||||
width: The width of the thumbnail image, in pixels.
|
||||
"""
|
||||
|
||||
_tag = 'thumbnail'
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['url'] = 'url'
|
||||
_attributes['width'] = 'width'
|
||||
_attributes['height'] = 'height'
|
||||
def __init__(self, url=None, width=None, height=None,
|
||||
extension_attributes=None, text=None, extension_elements=None):
|
||||
MediaBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
self.url = url
|
||||
self.width = width
|
||||
self.height = height
|
||||
|
||||
|
||||
def ThumbnailFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Thumbnail, xml_string)
|
||||
|
||||
|
||||
class Title(MediaBaseElement):
|
||||
"""(string) Contains the title of the entry's media content, in plain text.
|
||||
|
||||
Attributes:
|
||||
type: Always set to plain. To set the type member in the constructor, use
|
||||
the title_type parameter.
|
||||
"""
|
||||
|
||||
_tag = 'title'
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['type'] = 'type'
|
||||
def __init__(self, title_type=None,
|
||||
extension_attributes=None, text=None, extension_elements=None):
|
||||
MediaBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
self.type = title_type
|
||||
|
||||
|
||||
def TitleFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Title, xml_string)
|
||||
|
||||
|
||||
class Player(MediaBaseElement):
|
||||
"""(string) Contains the embeddable player URL for the entry's media content
|
||||
if the media is a video.
|
||||
|
||||
Attributes:
|
||||
url: Always set to plain
|
||||
"""
|
||||
|
||||
_tag = 'player'
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['url'] = 'url'
|
||||
|
||||
def __init__(self, player_url=None,
|
||||
extension_attributes=None, extension_elements=None):
|
||||
MediaBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes)
|
||||
self.url= player_url
|
||||
|
||||
|
||||
class Private(atom.AtomBase):
|
||||
"""The YouTube Private element"""
|
||||
_tag = 'private'
|
||||
_namespace = YOUTUBE_NAMESPACE
|
||||
|
||||
|
||||
class Duration(atom.AtomBase):
|
||||
"""The YouTube Duration element"""
|
||||
_tag = 'duration'
|
||||
_namespace = YOUTUBE_NAMESPACE
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['seconds'] = 'seconds'
|
||||
|
||||
|
||||
class Category(MediaBaseElement):
|
||||
"""The mediagroup:category element"""
|
||||
|
||||
_tag = 'category'
|
||||
_attributes = atom.AtomBase._attributes.copy()
|
||||
_attributes['term'] = 'term'
|
||||
_attributes['scheme'] = 'scheme'
|
||||
_attributes['label'] = 'label'
|
||||
|
||||
def __init__(self, term=None, scheme=None, label=None, text=None,
|
||||
extension_elements=None, extension_attributes=None):
|
||||
"""Constructor for Category
|
||||
|
||||
Args:
|
||||
term: str
|
||||
scheme: str
|
||||
label: str
|
||||
text: str The text data in the this element
|
||||
extension_elements: list A list of ExtensionElement instances
|
||||
extension_attributes: dict A dictionary of attribute value string pairs
|
||||
"""
|
||||
|
||||
self.term = term
|
||||
self.scheme = scheme
|
||||
self.label = label
|
||||
self.text = text
|
||||
self.extension_elements = extension_elements or []
|
||||
self.extension_attributes = extension_attributes or {}
|
||||
|
||||
|
||||
class Group(MediaBaseElement):
|
||||
"""Container element for all media elements.
|
||||
The <media:group> element can appear as a child of an album, photo or
|
||||
video entry."""
|
||||
|
||||
_tag = 'group'
|
||||
_children = atom.AtomBase._children.copy()
|
||||
_children['{%s}content' % MEDIA_NAMESPACE] = ('content', [Content,])
|
||||
_children['{%s}credit' % MEDIA_NAMESPACE] = ('credit', Credit)
|
||||
_children['{%s}description' % MEDIA_NAMESPACE] = ('description', Description)
|
||||
_children['{%s}keywords' % MEDIA_NAMESPACE] = ('keywords', Keywords)
|
||||
_children['{%s}thumbnail' % MEDIA_NAMESPACE] = ('thumbnail', [Thumbnail,])
|
||||
_children['{%s}title' % MEDIA_NAMESPACE] = ('title', Title)
|
||||
_children['{%s}category' % MEDIA_NAMESPACE] = ('category', [Category,])
|
||||
_children['{%s}duration' % YOUTUBE_NAMESPACE] = ('duration', Duration)
|
||||
_children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', Private)
|
||||
_children['{%s}player' % MEDIA_NAMESPACE] = ('player', Player)
|
||||
|
||||
def __init__(self, content=None, credit=None, description=None, keywords=None,
|
||||
thumbnail=None, title=None, duration=None, private=None,
|
||||
category=None, player=None, extension_elements=None,
|
||||
extension_attributes=None, text=None):
|
||||
|
||||
MediaBaseElement.__init__(self, extension_elements=extension_elements,
|
||||
extension_attributes=extension_attributes,
|
||||
text=text)
|
||||
self.content=content
|
||||
self.credit=credit
|
||||
self.description=description
|
||||
self.keywords=keywords
|
||||
self.thumbnail=thumbnail or []
|
||||
self.title=title
|
||||
self.duration=duration
|
||||
self.private=private
|
||||
self.category=category or []
|
||||
self.player=player
|
||||
|
||||
|
||||
def GroupFromString(xml_string):
|
||||
return atom.CreateClassFromXMLString(Group, xml_string)
|
||||
159
python/gdata/media/data.py
Normal file
159
python/gdata/media/data.py
Normal file
@@ -0,0 +1,159 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains the data classes of the Yahoo! Media RSS Extension"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
|
||||
|
||||
MEDIA_TEMPLATE = '{http://search.yahoo.com/mrss//}%s'
|
||||
|
||||
|
||||
class MediaCategory(atom.core.XmlElement):
|
||||
"""Describes a media category."""
|
||||
_qname = MEDIA_TEMPLATE % 'category'
|
||||
scheme = 'scheme'
|
||||
label = 'label'
|
||||
|
||||
|
||||
class MediaCopyright(atom.core.XmlElement):
|
||||
"""Describes a media copyright."""
|
||||
_qname = MEDIA_TEMPLATE % 'copyright'
|
||||
url = 'url'
|
||||
|
||||
|
||||
class MediaCredit(atom.core.XmlElement):
|
||||
"""Describes a media credit."""
|
||||
_qname = MEDIA_TEMPLATE % 'credit'
|
||||
role = 'role'
|
||||
scheme = 'scheme'
|
||||
|
||||
|
||||
class MediaDescription(atom.core.XmlElement):
|
||||
"""Describes a media description."""
|
||||
_qname = MEDIA_TEMPLATE % 'description'
|
||||
type = 'type'
|
||||
|
||||
|
||||
class MediaHash(atom.core.XmlElement):
|
||||
"""Describes a media hash."""
|
||||
_qname = MEDIA_TEMPLATE % 'hash'
|
||||
algo = 'algo'
|
||||
|
||||
|
||||
class MediaKeywords(atom.core.XmlElement):
|
||||
"""Describes a media keywords."""
|
||||
_qname = MEDIA_TEMPLATE % 'keywords'
|
||||
|
||||
|
||||
class MediaPlayer(atom.core.XmlElement):
|
||||
"""Describes a media player."""
|
||||
_qname = MEDIA_TEMPLATE % 'player'
|
||||
height = 'height'
|
||||
width = 'width'
|
||||
url = 'url'
|
||||
|
||||
|
||||
class MediaRating(atom.core.XmlElement):
|
||||
"""Describes a media rating."""
|
||||
_qname = MEDIA_TEMPLATE % 'rating'
|
||||
scheme = 'scheme'
|
||||
|
||||
|
||||
class MediaRestriction(atom.core.XmlElement):
|
||||
"""Describes a media restriction."""
|
||||
_qname = MEDIA_TEMPLATE % 'restriction'
|
||||
relationship = 'relationship'
|
||||
type = 'type'
|
||||
|
||||
|
||||
class MediaText(atom.core.XmlElement):
|
||||
"""Describes a media text."""
|
||||
_qname = MEDIA_TEMPLATE % 'text'
|
||||
end = 'end'
|
||||
lang = 'lang'
|
||||
type = 'type'
|
||||
start = 'start'
|
||||
|
||||
|
||||
class MediaThumbnail(atom.core.XmlElement):
|
||||
"""Describes a media thumbnail."""
|
||||
_qname = MEDIA_TEMPLATE % 'thumbnail'
|
||||
time = 'time'
|
||||
url = 'url'
|
||||
width = 'width'
|
||||
height = 'height'
|
||||
|
||||
|
||||
class MediaTitle(atom.core.XmlElement):
|
||||
"""Describes a media title."""
|
||||
_qname = MEDIA_TEMPLATE % 'title'
|
||||
type = 'type'
|
||||
|
||||
|
||||
class MediaContent(atom.core.XmlElement):
|
||||
"""Describes a media content."""
|
||||
_qname = MEDIA_TEMPLATE % 'content'
|
||||
bitrate = 'bitrate'
|
||||
is_default = 'isDefault'
|
||||
medium = 'medium'
|
||||
height = 'height'
|
||||
credit = [MediaCredit]
|
||||
language = 'language'
|
||||
hash = MediaHash
|
||||
width = 'width'
|
||||
player = MediaPlayer
|
||||
url = 'url'
|
||||
file_size = 'fileSize'
|
||||
channels = 'channels'
|
||||
expression = 'expression'
|
||||
text = [MediaText]
|
||||
samplingrate = 'samplingrate'
|
||||
title = MediaTitle
|
||||
category = [MediaCategory]
|
||||
rating = [MediaRating]
|
||||
type = 'type'
|
||||
description = MediaDescription
|
||||
framerate = 'framerate'
|
||||
thumbnail = [MediaThumbnail]
|
||||
duration = 'duration'
|
||||
copyright = MediaCopyright
|
||||
keywords = MediaKeywords
|
||||
restriction = [MediaRestriction]
|
||||
|
||||
|
||||
class MediaGroup(atom.core.XmlElement):
|
||||
"""Describes a media group."""
|
||||
_qname = MEDIA_TEMPLATE % 'group'
|
||||
credit = [MediaCredit]
|
||||
content = [MediaContent]
|
||||
copyright = MediaCopyright
|
||||
description = MediaDescription
|
||||
category = [MediaCategory]
|
||||
player = MediaPlayer
|
||||
rating = [MediaRating]
|
||||
hash = MediaHash
|
||||
title = MediaTitle
|
||||
keywords = MediaKeywords
|
||||
restriction = [MediaRestriction]
|
||||
thumbnail = [MediaThumbnail]
|
||||
text = [MediaText]
|
||||
|
||||
|
||||
15
python/gdata/notebook/__init__.py
Normal file
15
python/gdata/notebook/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
55
python/gdata/notebook/data.py
Normal file
55
python/gdata/notebook/data.py
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains the data classes of the Google Notebook Data API"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
import atom.data
|
||||
import gdata.data
|
||||
import gdata.opensearch.data
|
||||
|
||||
|
||||
NB_TEMPLATE = '{http://schemas.google.com/notes/2008/}%s'
|
||||
|
||||
|
||||
class ComesAfter(atom.core.XmlElement):
|
||||
"""Preceding element."""
|
||||
_qname = NB_TEMPLATE % 'comesAfter'
|
||||
id = 'id'
|
||||
|
||||
|
||||
class NoteEntry(gdata.data.GDEntry):
|
||||
"""Describes a note entry in the feed of a user's notebook."""
|
||||
|
||||
|
||||
class NotebookFeed(gdata.data.GDFeed):
|
||||
"""Describes a notebook feed."""
|
||||
entry = [NoteEntry]
|
||||
|
||||
|
||||
class NotebookListEntry(gdata.data.GDEntry):
|
||||
"""Describes a note list entry in the feed of a user's list of public notebooks."""
|
||||
|
||||
|
||||
class NotebookListFeed(gdata.data.GDFeed):
|
||||
"""Describes a notebook list feed."""
|
||||
entry = [NotebookListEntry]
|
||||
|
||||
|
||||
529
python/gdata/oauth/__init__.py
Normal file
529
python/gdata/oauth/__init__.py
Normal file
@@ -0,0 +1,529 @@
|
||||
import cgi
|
||||
import urllib
|
||||
import time
|
||||
import random
|
||||
import urlparse
|
||||
import hmac
|
||||
import binascii
|
||||
|
||||
VERSION = '1.0' # Hi Blaine!
|
||||
HTTP_METHOD = 'GET'
|
||||
SIGNATURE_METHOD = 'PLAINTEXT'
|
||||
|
||||
# Generic exception class
|
||||
class OAuthError(RuntimeError):
|
||||
def __init__(self, message='OAuth error occured.'):
|
||||
self.message = message
|
||||
|
||||
# optional WWW-Authenticate header (401 error)
|
||||
def build_authenticate_header(realm=''):
|
||||
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
|
||||
|
||||
# url escape
|
||||
def escape(s):
|
||||
# escape '/' too
|
||||
return urllib.quote(s, safe='~')
|
||||
|
||||
# util function: current timestamp
|
||||
# seconds since epoch (UTC)
|
||||
def generate_timestamp():
|
||||
return int(time.time())
|
||||
|
||||
# util function: nonce
|
||||
# pseudorandom number
|
||||
def generate_nonce(length=8):
|
||||
return ''.join([str(random.randint(0, 9)) for i in range(length)])
|
||||
|
||||
# OAuthConsumer is a data type that represents the identity of the Consumer
|
||||
# via its shared secret with the Service Provider.
|
||||
class OAuthConsumer(object):
|
||||
key = None
|
||||
secret = None
|
||||
|
||||
def __init__(self, key, secret):
|
||||
self.key = key
|
||||
self.secret = secret
|
||||
|
||||
# OAuthToken is a data type that represents an End User via either an access
|
||||
# or request token.
|
||||
class OAuthToken(object):
|
||||
# access tokens and request tokens
|
||||
key = None
|
||||
secret = None
|
||||
|
||||
'''
|
||||
key = the token
|
||||
secret = the token secret
|
||||
'''
|
||||
def __init__(self, key, secret):
|
||||
self.key = key
|
||||
self.secret = secret
|
||||
|
||||
def to_string(self):
|
||||
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
|
||||
|
||||
# return a token from something like:
|
||||
# oauth_token_secret=digg&oauth_token=digg
|
||||
def from_string(s):
|
||||
params = cgi.parse_qs(s, keep_blank_values=False)
|
||||
key = params['oauth_token'][0]
|
||||
secret = params['oauth_token_secret'][0]
|
||||
return OAuthToken(key, secret)
|
||||
from_string = staticmethod(from_string)
|
||||
|
||||
def __str__(self):
|
||||
return self.to_string()
|
||||
|
||||
# OAuthRequest represents the request and can be serialized
|
||||
class OAuthRequest(object):
|
||||
'''
|
||||
OAuth parameters:
|
||||
- oauth_consumer_key
|
||||
- oauth_token
|
||||
- oauth_signature_method
|
||||
- oauth_signature
|
||||
- oauth_timestamp
|
||||
- oauth_nonce
|
||||
- oauth_version
|
||||
... any additional parameters, as defined by the Service Provider.
|
||||
'''
|
||||
parameters = None # oauth parameters
|
||||
http_method = HTTP_METHOD
|
||||
http_url = None
|
||||
version = VERSION
|
||||
|
||||
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
|
||||
self.http_method = http_method
|
||||
self.http_url = http_url
|
||||
self.parameters = parameters or {}
|
||||
|
||||
def set_parameter(self, parameter, value):
|
||||
self.parameters[parameter] = value
|
||||
|
||||
def get_parameter(self, parameter):
|
||||
try:
|
||||
return self.parameters[parameter]
|
||||
except:
|
||||
raise OAuthError('Parameter not found: %s' % parameter)
|
||||
|
||||
def _get_timestamp_nonce(self):
|
||||
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
|
||||
|
||||
# get any non-oauth parameters
|
||||
def get_nonoauth_parameters(self):
|
||||
parameters = {}
|
||||
for k, v in self.parameters.iteritems():
|
||||
# ignore oauth parameters
|
||||
if k.find('oauth_') < 0:
|
||||
parameters[k] = v
|
||||
return parameters
|
||||
|
||||
# serialize as a header for an HTTPAuth request
|
||||
def to_header(self, realm=''):
|
||||
auth_header = 'OAuth realm="%s"' % realm
|
||||
# add the oauth parameters
|
||||
if self.parameters:
|
||||
for k, v in self.parameters.iteritems():
|
||||
if k[:6] == 'oauth_':
|
||||
auth_header += ', %s="%s"' % (k, escape(str(v)))
|
||||
return {'Authorization': auth_header}
|
||||
|
||||
# serialize as post data for a POST request
|
||||
def to_postdata(self):
|
||||
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()])
|
||||
|
||||
# serialize as a url for a GET request
|
||||
def to_url(self):
|
||||
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
|
||||
|
||||
# return a string that consists of all the parameters that need to be signed
|
||||
def get_normalized_parameters(self):
|
||||
params = self.parameters
|
||||
try:
|
||||
# exclude the signature if it exists
|
||||
del params['oauth_signature']
|
||||
except:
|
||||
pass
|
||||
key_values = params.items()
|
||||
# sort lexicographically, first after key, then after value
|
||||
key_values.sort()
|
||||
# combine key value pairs in string and escape
|
||||
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values])
|
||||
|
||||
# just uppercases the http method
|
||||
def get_normalized_http_method(self):
|
||||
return self.http_method.upper()
|
||||
|
||||
# parses the url and rebuilds it to be scheme://host/path
|
||||
def get_normalized_http_url(self):
|
||||
parts = urlparse.urlparse(self.http_url)
|
||||
host = parts[1].lower()
|
||||
if host.endswith(':80') or host.endswith(':443'):
|
||||
host = host.split(':')[0]
|
||||
url_string = '%s://%s%s' % (parts[0], host, parts[2]) # scheme, netloc, path
|
||||
return url_string
|
||||
|
||||
# set the signature parameter to the result of build_signature
|
||||
def sign_request(self, signature_method, consumer, token):
|
||||
# set the signature method
|
||||
self.set_parameter('oauth_signature_method', signature_method.get_name())
|
||||
# set the signature
|
||||
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
|
||||
|
||||
def build_signature(self, signature_method, consumer, token):
|
||||
# call the build signature method within the signature method
|
||||
return signature_method.build_signature(self, consumer, token)
|
||||
|
||||
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
|
||||
# combine multiple parameter sources
|
||||
if parameters is None:
|
||||
parameters = {}
|
||||
|
||||
# headers
|
||||
if headers and 'Authorization' in headers:
|
||||
auth_header = headers['Authorization']
|
||||
# check that the authorization header is OAuth
|
||||
if auth_header.index('OAuth') > -1:
|
||||
try:
|
||||
# get the parameters from the header
|
||||
header_params = OAuthRequest._split_header(auth_header)
|
||||
parameters.update(header_params)
|
||||
except:
|
||||
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
|
||||
|
||||
# GET or POST query string
|
||||
if query_string:
|
||||
query_params = OAuthRequest._split_url_string(query_string)
|
||||
parameters.update(query_params)
|
||||
|
||||
# URL parameters
|
||||
param_str = urlparse.urlparse(http_url)[4] # query
|
||||
url_params = OAuthRequest._split_url_string(param_str)
|
||||
parameters.update(url_params)
|
||||
|
||||
if parameters:
|
||||
return OAuthRequest(http_method, http_url, parameters)
|
||||
|
||||
return None
|
||||
from_request = staticmethod(from_request)
|
||||
|
||||
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
|
||||
if not parameters:
|
||||
parameters = {}
|
||||
|
||||
defaults = {
|
||||
'oauth_consumer_key': oauth_consumer.key,
|
||||
'oauth_timestamp': generate_timestamp(),
|
||||
'oauth_nonce': generate_nonce(),
|
||||
'oauth_version': OAuthRequest.version,
|
||||
}
|
||||
|
||||
defaults.update(parameters)
|
||||
parameters = defaults
|
||||
|
||||
if token:
|
||||
parameters['oauth_token'] = token.key
|
||||
|
||||
return OAuthRequest(http_method, http_url, parameters)
|
||||
from_consumer_and_token = staticmethod(from_consumer_and_token)
|
||||
|
||||
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
|
||||
if not parameters:
|
||||
parameters = {}
|
||||
|
||||
parameters['oauth_token'] = token.key
|
||||
|
||||
if callback:
|
||||
parameters['oauth_callback'] = callback
|
||||
|
||||
return OAuthRequest(http_method, http_url, parameters)
|
||||
from_token_and_callback = staticmethod(from_token_and_callback)
|
||||
|
||||
# util function: turn Authorization: header into parameters, has to do some unescaping
|
||||
def _split_header(header):
|
||||
params = {}
|
||||
parts = header[6:].split(',')
|
||||
for param in parts:
|
||||
# ignore realm parameter
|
||||
if param.find('realm') > -1:
|
||||
continue
|
||||
# remove whitespace
|
||||
param = param.strip()
|
||||
# split key-value
|
||||
param_parts = param.split('=', 1)
|
||||
# remove quotes and unescape the value
|
||||
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
|
||||
return params
|
||||
_split_header = staticmethod(_split_header)
|
||||
|
||||
# util function: turn url string into parameters, has to do some unescaping
|
||||
# even empty values should be included
|
||||
def _split_url_string(param_str):
|
||||
parameters = cgi.parse_qs(param_str, keep_blank_values=True)
|
||||
for k, v in parameters.iteritems():
|
||||
parameters[k] = urllib.unquote(v[0])
|
||||
return parameters
|
||||
_split_url_string = staticmethod(_split_url_string)
|
||||
|
||||
# OAuthServer is a worker to check a requests validity against a data store
|
||||
class OAuthServer(object):
|
||||
timestamp_threshold = 300 # in seconds, five minutes
|
||||
version = VERSION
|
||||
signature_methods = None
|
||||
data_store = None
|
||||
|
||||
def __init__(self, data_store=None, signature_methods=None):
|
||||
self.data_store = data_store
|
||||
self.signature_methods = signature_methods or {}
|
||||
|
||||
def set_data_store(self, oauth_data_store):
|
||||
self.data_store = oauth_data_store
|
||||
|
||||
def get_data_store(self):
|
||||
return self.data_store
|
||||
|
||||
def add_signature_method(self, signature_method):
|
||||
self.signature_methods[signature_method.get_name()] = signature_method
|
||||
return self.signature_methods
|
||||
|
||||
# process a request_token request
|
||||
# returns the request token on success
|
||||
def fetch_request_token(self, oauth_request):
|
||||
try:
|
||||
# get the request token for authorization
|
||||
token = self._get_token(oauth_request, 'request')
|
||||
except OAuthError:
|
||||
# no token required for the initial token request
|
||||
version = self._get_version(oauth_request)
|
||||
consumer = self._get_consumer(oauth_request)
|
||||
self._check_signature(oauth_request, consumer, None)
|
||||
# fetch a new token
|
||||
token = self.data_store.fetch_request_token(consumer)
|
||||
return token
|
||||
|
||||
# process an access_token request
|
||||
# returns the access token on success
|
||||
def fetch_access_token(self, oauth_request):
|
||||
version = self._get_version(oauth_request)
|
||||
consumer = self._get_consumer(oauth_request)
|
||||
# get the request token
|
||||
token = self._get_token(oauth_request, 'request')
|
||||
self._check_signature(oauth_request, consumer, token)
|
||||
new_token = self.data_store.fetch_access_token(consumer, token)
|
||||
return new_token
|
||||
|
||||
# verify an api call, checks all the parameters
|
||||
def verify_request(self, oauth_request):
|
||||
# -> consumer and token
|
||||
version = self._get_version(oauth_request)
|
||||
consumer = self._get_consumer(oauth_request)
|
||||
# get the access token
|
||||
token = self._get_token(oauth_request, 'access')
|
||||
self._check_signature(oauth_request, consumer, token)
|
||||
parameters = oauth_request.get_nonoauth_parameters()
|
||||
return consumer, token, parameters
|
||||
|
||||
# authorize a request token
|
||||
def authorize_token(self, token, user):
|
||||
return self.data_store.authorize_request_token(token, user)
|
||||
|
||||
# get the callback url
|
||||
def get_callback(self, oauth_request):
|
||||
return oauth_request.get_parameter('oauth_callback')
|
||||
|
||||
# optional support for the authenticate header
|
||||
def build_authenticate_header(self, realm=''):
|
||||
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
|
||||
|
||||
# verify the correct version request for this server
|
||||
def _get_version(self, oauth_request):
|
||||
try:
|
||||
version = oauth_request.get_parameter('oauth_version')
|
||||
except:
|
||||
version = VERSION
|
||||
if version and version != self.version:
|
||||
raise OAuthError('OAuth version %s not supported.' % str(version))
|
||||
return version
|
||||
|
||||
# figure out the signature with some defaults
|
||||
def _get_signature_method(self, oauth_request):
|
||||
try:
|
||||
signature_method = oauth_request.get_parameter('oauth_signature_method')
|
||||
except:
|
||||
signature_method = SIGNATURE_METHOD
|
||||
try:
|
||||
# get the signature method object
|
||||
signature_method = self.signature_methods[signature_method]
|
||||
except:
|
||||
signature_method_names = ', '.join(self.signature_methods.keys())
|
||||
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
|
||||
|
||||
return signature_method
|
||||
|
||||
def _get_consumer(self, oauth_request):
|
||||
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
|
||||
if not consumer_key:
|
||||
raise OAuthError('Invalid consumer key.')
|
||||
consumer = self.data_store.lookup_consumer(consumer_key)
|
||||
if not consumer:
|
||||
raise OAuthError('Invalid consumer.')
|
||||
return consumer
|
||||
|
||||
# try to find the token for the provided request token key
|
||||
def _get_token(self, oauth_request, token_type='access'):
|
||||
token_field = oauth_request.get_parameter('oauth_token')
|
||||
consumer = self._get_consumer(oauth_request)
|
||||
token = self.data_store.lookup_token(consumer, token_type, token_field)
|
||||
if not token:
|
||||
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
|
||||
return token
|
||||
|
||||
def _check_signature(self, oauth_request, consumer, token):
|
||||
timestamp, nonce = oauth_request._get_timestamp_nonce()
|
||||
self._check_timestamp(timestamp)
|
||||
self._check_nonce(consumer, token, nonce)
|
||||
signature_method = self._get_signature_method(oauth_request)
|
||||
try:
|
||||
signature = oauth_request.get_parameter('oauth_signature')
|
||||
except:
|
||||
raise OAuthError('Missing signature.')
|
||||
# validate the signature
|
||||
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
|
||||
if not valid_sig:
|
||||
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
|
||||
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
|
||||
built = signature_method.build_signature(oauth_request, consumer, token)
|
||||
|
||||
def _check_timestamp(self, timestamp):
|
||||
# verify that timestamp is recentish
|
||||
timestamp = int(timestamp)
|
||||
now = int(time.time())
|
||||
lapsed = now - timestamp
|
||||
if lapsed > self.timestamp_threshold:
|
||||
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
|
||||
|
||||
def _check_nonce(self, consumer, token, nonce):
|
||||
# verify that the nonce is uniqueish
|
||||
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
|
||||
if nonce:
|
||||
raise OAuthError('Nonce already used: %s' % str(nonce))
|
||||
|
||||
# OAuthClient is a worker to attempt to execute a request
|
||||
class OAuthClient(object):
|
||||
consumer = None
|
||||
token = None
|
||||
|
||||
def __init__(self, oauth_consumer, oauth_token):
|
||||
self.consumer = oauth_consumer
|
||||
self.token = oauth_token
|
||||
|
||||
def get_consumer(self):
|
||||
return self.consumer
|
||||
|
||||
def get_token(self):
|
||||
return self.token
|
||||
|
||||
def fetch_request_token(self, oauth_request):
|
||||
# -> OAuthToken
|
||||
raise NotImplementedError
|
||||
|
||||
def fetch_access_token(self, oauth_request):
|
||||
# -> OAuthToken
|
||||
raise NotImplementedError
|
||||
|
||||
def access_resource(self, oauth_request):
|
||||
# -> some protected resource
|
||||
raise NotImplementedError
|
||||
|
||||
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
|
||||
class OAuthDataStore(object):
|
||||
|
||||
def lookup_consumer(self, key):
|
||||
# -> OAuthConsumer
|
||||
raise NotImplementedError
|
||||
|
||||
def lookup_token(self, oauth_consumer, token_type, token_token):
|
||||
# -> OAuthToken
|
||||
raise NotImplementedError
|
||||
|
||||
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
|
||||
# -> OAuthToken
|
||||
raise NotImplementedError
|
||||
|
||||
def fetch_request_token(self, oauth_consumer):
|
||||
# -> OAuthToken
|
||||
raise NotImplementedError
|
||||
|
||||
def fetch_access_token(self, oauth_consumer, oauth_token):
|
||||
# -> OAuthToken
|
||||
raise NotImplementedError
|
||||
|
||||
def authorize_request_token(self, oauth_token, user):
|
||||
# -> OAuthToken
|
||||
raise NotImplementedError
|
||||
|
||||
# OAuthSignatureMethod is a strategy class that implements a signature method
|
||||
class OAuthSignatureMethod(object):
|
||||
def get_name(self):
|
||||
# -> str
|
||||
raise NotImplementedError
|
||||
|
||||
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
|
||||
# -> str key, str raw
|
||||
raise NotImplementedError
|
||||
|
||||
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
|
||||
# -> str
|
||||
raise NotImplementedError
|
||||
|
||||
def check_signature(self, oauth_request, consumer, token, signature):
|
||||
built = self.build_signature(oauth_request, consumer, token)
|
||||
return built == signature
|
||||
|
||||
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
|
||||
|
||||
def get_name(self):
|
||||
return 'HMAC-SHA1'
|
||||
|
||||
def build_signature_base_string(self, oauth_request, consumer, token):
|
||||
sig = (
|
||||
escape(oauth_request.get_normalized_http_method()),
|
||||
escape(oauth_request.get_normalized_http_url()),
|
||||
escape(oauth_request.get_normalized_parameters()),
|
||||
)
|
||||
|
||||
key = '%s&' % escape(consumer.secret)
|
||||
if token:
|
||||
key += escape(token.secret)
|
||||
raw = '&'.join(sig)
|
||||
return key, raw
|
||||
|
||||
def build_signature(self, oauth_request, consumer, token):
|
||||
# build the base signature string
|
||||
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
|
||||
|
||||
# hmac object
|
||||
try:
|
||||
import hashlib # 2.5
|
||||
hashed = hmac.new(key, raw, hashlib.sha1)
|
||||
except:
|
||||
import sha # deprecated
|
||||
hashed = hmac.new(key, raw, sha)
|
||||
|
||||
# calculate the digest base 64
|
||||
return binascii.b2a_base64(hashed.digest())[:-1]
|
||||
|
||||
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
|
||||
|
||||
def get_name(self):
|
||||
return 'PLAINTEXT'
|
||||
|
||||
def build_signature_base_string(self, oauth_request, consumer, token):
|
||||
# concatenate the consumer key and secret
|
||||
sig = escape(consumer.secret) + '&'
|
||||
if token:
|
||||
sig = sig + escape(token.secret)
|
||||
return sig
|
||||
|
||||
def build_signature(self, oauth_request, consumer, token):
|
||||
return self.build_signature_base_string(oauth_request, consumer, token)
|
||||
120
python/gdata/oauth/rsa.py
Normal file
120
python/gdata/oauth/rsa.py
Normal file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""
|
||||
requires tlslite - http://trevp.net/tlslite/
|
||||
|
||||
"""
|
||||
|
||||
import binascii
|
||||
|
||||
from gdata.tlslite.utils import keyfactory
|
||||
from gdata.tlslite.utils import cryptomath
|
||||
|
||||
# XXX andy: ugly local import due to module name, oauth.oauth
|
||||
import gdata.oauth as oauth
|
||||
|
||||
class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod):
|
||||
def get_name(self):
|
||||
return "RSA-SHA1"
|
||||
|
||||
def _fetch_public_cert(self, oauth_request):
|
||||
# not implemented yet, ideas are:
|
||||
# (1) do a lookup in a table of trusted certs keyed off of consumer
|
||||
# (2) fetch via http using a url provided by the requester
|
||||
# (3) some sort of specific discovery code based on request
|
||||
#
|
||||
# either way should return a string representation of the certificate
|
||||
raise NotImplementedError
|
||||
|
||||
def _fetch_private_cert(self, oauth_request):
|
||||
# not implemented yet, ideas are:
|
||||
# (1) do a lookup in a table of trusted certs keyed off of consumer
|
||||
#
|
||||
# either way should return a string representation of the certificate
|
||||
raise NotImplementedError
|
||||
|
||||
def build_signature_base_string(self, oauth_request, consumer, token):
|
||||
sig = (
|
||||
oauth.escape(oauth_request.get_normalized_http_method()),
|
||||
oauth.escape(oauth_request.get_normalized_http_url()),
|
||||
oauth.escape(oauth_request.get_normalized_parameters()),
|
||||
)
|
||||
key = ''
|
||||
raw = '&'.join(sig)
|
||||
return key, raw
|
||||
|
||||
def build_signature(self, oauth_request, consumer, token):
|
||||
key, base_string = self.build_signature_base_string(oauth_request,
|
||||
consumer,
|
||||
token)
|
||||
|
||||
# Fetch the private key cert based on the request
|
||||
cert = self._fetch_private_cert(oauth_request)
|
||||
|
||||
# Pull the private key from the certificate
|
||||
privatekey = keyfactory.parsePrivateKey(cert)
|
||||
|
||||
# Convert base_string to bytes
|
||||
#base_string_bytes = cryptomath.createByteArraySequence(base_string)
|
||||
|
||||
# Sign using the key
|
||||
signed = privatekey.hashAndSign(base_string)
|
||||
|
||||
return binascii.b2a_base64(signed)[:-1]
|
||||
|
||||
def check_signature(self, oauth_request, consumer, token, signature):
|
||||
decoded_sig = base64.b64decode(signature);
|
||||
|
||||
key, base_string = self.build_signature_base_string(oauth_request,
|
||||
consumer,
|
||||
token)
|
||||
|
||||
# Fetch the public key cert based on the request
|
||||
cert = self._fetch_public_cert(oauth_request)
|
||||
|
||||
# Pull the public key from the certificate
|
||||
publickey = keyfactory.parsePEMKey(cert, public=True)
|
||||
|
||||
# Check the signature
|
||||
ok = publickey.hashAndVerify(decoded_sig, base_string)
|
||||
|
||||
return ok
|
||||
|
||||
|
||||
class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1):
|
||||
def _fetch_public_cert(self, oauth_request):
|
||||
cert = """
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0
|
||||
IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV
|
||||
BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
|
||||
gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY
|
||||
zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb
|
||||
mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3
|
||||
DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d
|
||||
4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb
|
||||
WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
return cert
|
||||
|
||||
def _fetch_private_cert(self, oauth_request):
|
||||
cert = """
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
|
||||
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
|
||||
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
|
||||
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
|
||||
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
|
||||
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
|
||||
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
|
||||
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
|
||||
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
|
||||
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
|
||||
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
|
||||
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
|
||||
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
|
||||
Lw03eHTNQghS0A==
|
||||
-----END PRIVATE KEY-----
|
||||
"""
|
||||
return cert
|
||||
15
python/gdata/opensearch/__init__.py
Normal file
15
python/gdata/opensearch/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
48
python/gdata/opensearch/data.py
Normal file
48
python/gdata/opensearch/data.py
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2009 Google Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Contains the data classes of the OpenSearch Extension"""
|
||||
|
||||
|
||||
__author__ = 'j.s@google.com (Jeff Scudder)'
|
||||
|
||||
|
||||
import atom.core
|
||||
|
||||
|
||||
OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0//}%s'
|
||||
OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1//}%s'
|
||||
|
||||
|
||||
class ItemsPerPage(atom.core.XmlElement):
|
||||
"""Describes the number of items that will be returned per page for paged feeds"""
|
||||
_qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage',
|
||||
OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage')
|
||||
|
||||
|
||||
class StartIndex(atom.core.XmlElement):
|
||||
"""Describes the starting index of the contained entries for paged feeds"""
|
||||
_qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex',
|
||||
OPENSEARCH_TEMPLATE_V2 % 'startIndex')
|
||||
|
||||
|
||||
class TotalResults(atom.core.XmlElement):
|
||||
"""Describes the total number of results associated with this feed"""
|
||||
_qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults',
|
||||
OPENSEARCH_TEMPLATE_V2 % 'totalResults')
|
||||
|
||||
|
||||
1112
python/gdata/photos/__init__.py
Normal file
1112
python/gdata/photos/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user