confidence
This commit is contained in:
parent
939dba20ee
commit
05274b4f0b
|
@ -390,7 +390,7 @@ class LPLS(Model):
|
||||||
"""
|
"""
|
||||||
aopt = self.model['aopt']
|
aopt = self.model['aopt']
|
||||||
if opt['calc_conf']:
|
if opt['calc_conf']:
|
||||||
Wx, Wz = lpls_jk(self.model['X'], self.model['Y'], self.model['Z'], aopt, n_sets)
|
Wx, Wz = lpls_jk(self._data['X'], self._data['Y'], self._data['Z'], aopt, opt['n_sets'], opt['xz_alpha'])
|
||||||
Wcal = self.model['W'][:,:aopt]
|
Wcal = self.model['W'][:,:aopt]
|
||||||
Lcal = self.model['L'][:,:aopt]
|
Lcal = self.model['L'][:,:aopt]
|
||||||
# ensure that Wcal is scaled
|
# ensure that Wcal is scaled
|
||||||
|
@ -669,8 +669,8 @@ class LplsOptions(Options):
|
||||||
opt['center'] = True
|
opt['center'] = True
|
||||||
opt['center_mth'] = [2, 0, 1]
|
opt['center_mth'] = [2, 0, 1]
|
||||||
opt['scale'] = 'scores'
|
opt['scale'] = 'scores'
|
||||||
opt['calc_conf'] = False
|
opt['calc_conf'] = True
|
||||||
opt['n_sets'] = 7
|
opt['n_sets'] = 20
|
||||||
opt['strict'] = False
|
opt['strict'] = False
|
||||||
opt['p_center'] = 'med'
|
opt['p_center'] = 'med'
|
||||||
opt['alpha'] = .3
|
opt['alpha'] = .3
|
||||||
|
@ -698,8 +698,8 @@ class LplsOptions(Options):
|
||||||
(blmplots.LplsHypoidCorrelationPlot, 'Hypoid corr.', False)
|
(blmplots.LplsHypoidCorrelationPlot, 'Hypoid corr.', False)
|
||||||
]
|
]
|
||||||
|
|
||||||
opt['out_data'] = ['T','P']
|
opt['out_data'] = ['T','P', 'tsqx']
|
||||||
opt['out_plots'] = [blmplots.PlsScorePlot,blmplots.PlsLoadingPlot,blmplots.LineViewXc]
|
opt['out_plots'] = [blmplots.PlsScorePlot,blmplots.LplsXLoadingPlot,blmplots.LplsZLoadingPlot, blmplots.LineViewXc]
|
||||||
|
|
||||||
#opt['out_data'] = None
|
#opt['out_data'] = None
|
||||||
|
|
||||||
|
|
|
@ -190,6 +190,15 @@ class PlsCorrelationLoadingPlot(BlmScatterPlot):
|
||||||
title = "Pls correlation loadings (%s)" %model._dataset['X'].get_name()
|
title = "Pls correlation loadings (%s)" %model._dataset['X'].get_name()
|
||||||
BlmScatterPlot.__init__(self, title, model, absi, ordi, part_name='CP')
|
BlmScatterPlot.__init__(self, title, model, absi, ordi, part_name='CP')
|
||||||
|
|
||||||
|
class LplsXLoadingPlot(BlmScatterPlot):
|
||||||
|
def __init__(self, model, absi=0, ordi=1):
|
||||||
|
title = "Lpls x-loadings (%s)" %model._dataset['X'].get_name()
|
||||||
|
BlmScatterPlot.__init__(self, title, model, absi, ordi, part_name='P', color_by='tsqx')
|
||||||
|
|
||||||
|
class LplsZLoadingPlot(BlmScatterPlot):
|
||||||
|
def __init__(self, model, absi=0, ordi=1):
|
||||||
|
title = "Lpls z-loadings (%s)" %model._dataset['Z'].get_name()
|
||||||
|
BlmScatterPlot.__init__(self, title, model, absi, ordi, part_name='L', color_by='tsqz')
|
||||||
|
|
||||||
class LplsHypoidCorrelationPlot(BlmScatterPlot):
|
class LplsHypoidCorrelationPlot(BlmScatterPlot):
|
||||||
def __init__(self, model, absi=0, ordi=1):
|
def __init__(self, model, absi=0, ordi=1):
|
||||||
|
|
|
@ -219,7 +219,7 @@ def bridge(a, b, aopt, scale='scores', mode='normal', r=0):
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def nipals_lpls(X, Y, Z, amax, alpha=.7, mean_ctr=[2, 0, 1], mode='normal', scale='scores', verbose=False):
|
def nipals_lpls(X, Y, Z, a_max, alpha=.7, mean_ctr=[2, 0, 1], mode='normal', scale='scores', verbose=False):
|
||||||
""" L-shaped Partial Least Sqaures Regression by the nipals algorithm.
|
""" L-shaped Partial Least Sqaures Regression by the nipals algorithm.
|
||||||
|
|
||||||
(X!Z)->Y
|
(X!Z)->Y
|
||||||
|
@ -260,18 +260,18 @@ def nipals_lpls(X, Y, Z, amax, alpha=.7, mean_ctr=[2, 0, 1], mode='normal', scal
|
||||||
u, o = Z.shape
|
u, o = Z.shape
|
||||||
|
|
||||||
# initialize
|
# initialize
|
||||||
U = empty((k, amax))
|
U = empty((k, a_max))
|
||||||
Q = empty((l, amax))
|
Q = empty((l, a_max))
|
||||||
T = empty((m, amax))
|
T = empty((m, a_max))
|
||||||
W = empty((n, amax))
|
W = empty((n, a_max))
|
||||||
P = empty((n, amax))
|
P = empty((n, a_max))
|
||||||
K = empty((o, amax))
|
K = empty((o, a_max))
|
||||||
L = empty((u, amax))
|
L = empty((u, a_max))
|
||||||
var_x = empty((amax,))
|
var_x = empty((a_max,))
|
||||||
var_y = empty((amax,))
|
var_y = empty((a_max,))
|
||||||
var_z = empty((amax,))
|
var_z = empty((a_max,))
|
||||||
|
|
||||||
for a in range(amax):
|
for a in range(a_max):
|
||||||
if verbose:
|
if verbose:
|
||||||
print "\n Working on comp. %s" %a
|
print "\n Working on comp. %s" %a
|
||||||
u = Y[:,:1]
|
u = Y[:,:1]
|
||||||
|
|
|
@ -6,7 +6,7 @@ from scipy.stats import median
|
||||||
from scipy.linalg import triu,inv,svd,norm
|
from scipy.linalg import triu,inv,svd,norm
|
||||||
|
|
||||||
from select_generators import w_pls_gen,w_pls_gen_jk,pls_gen,pca_gen,diag_pert
|
from select_generators import w_pls_gen,w_pls_gen_jk,pls_gen,pca_gen,diag_pert
|
||||||
from engines import w_simpls,pls,bridge,pca
|
from engines import w_simpls,pls,bridge,pca,nipals_lpls
|
||||||
from cx_utils import m_shape
|
from cx_utils import m_shape
|
||||||
|
|
||||||
def w_pls_cv_val(X, Y, amax, n_blocks=None, algo='simpls'):
|
def w_pls_cv_val(X, Y, amax, n_blocks=None, algo='simpls'):
|
||||||
|
@ -111,15 +111,17 @@ def pls_val(X, Y, amax=2, n_blocks=10, algo='pls', metric=None):
|
||||||
|
|
||||||
def lpls_val(X, Y, Z, a_max=2, nsets=None,alpha=.5):
|
def lpls_val(X, Y, Z, a_max=2, nsets=None,alpha=.5):
|
||||||
"""Performs crossvalidation to get generalisation error in lpls"""
|
"""Performs crossvalidation to get generalisation error in lpls"""
|
||||||
cv_iter = select_generators.pls_gen(X, Y, n_blocks=nsets,center=False,index_out=True)
|
cv_iter = pls_gen(X, Y, n_blocks=nsets,center=False,index_out=True)
|
||||||
k, l = Y.shape
|
k, l = Y.shape
|
||||||
Yhat = empty((a_max,k,l), 'd')
|
Yhat = empty((a_max,k,l), 'd')
|
||||||
for i, (xcal,xi,ycal,yi,ind) in enumerate(cv_iter):
|
for i, (xcal,xi,ycal,yi,ind) in enumerate(cv_iter):
|
||||||
T, W, P, Q, U, L, K, B, b0, evx, evy, evz = nipals_lpls(xcal,ycal,Z,
|
dat = nipals_lpls(xcal,ycal,Z,
|
||||||
a_max=a_max,
|
a_max=a_max,
|
||||||
alpha=alpha,
|
alpha=alpha,
|
||||||
mean_ctr=[2,0,1],
|
mean_ctr=[2,0,1],
|
||||||
verbose=False)
|
verbose=False)
|
||||||
|
B = dat['B']
|
||||||
|
b0 = dat['b0']
|
||||||
for a in range(a_max):
|
for a in range(a_max):
|
||||||
Yhat[a,ind,:] = b0[a][0][0] + dot(xi, B[a])
|
Yhat[a,ind,:] = b0[a][0][0] + dot(xi, B[a])
|
||||||
Yhat_class = zeros_like(Yhat)
|
Yhat_class = zeros_like(Yhat)
|
||||||
|
@ -135,9 +137,6 @@ def lpls_val(X, Y, Z, a_max=2, nsets=None,alpha=.5):
|
||||||
def pca_alter_val(a, amax, n_sets=10, method='diag'):
|
def pca_alter_val(a, amax, n_sets=10, method='diag'):
|
||||||
"""Pca validation by altering elements in X.
|
"""Pca validation by altering elements in X.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
comments:
|
comments:
|
||||||
-- may do all jk estimates in this loop
|
-- may do all jk estimates in this loop
|
||||||
"""
|
"""
|
||||||
|
@ -250,7 +249,7 @@ def pca_jkP(a, aopt, n_blocks=None, metric=None):
|
||||||
|
|
||||||
|
|
||||||
def lpls_jk(X, Y, Z, a_max, nsets=None, alpha=.5):
|
def lpls_jk(X, Y, Z, a_max, nsets=None, alpha=.5):
|
||||||
cv_iter = select_generators.pls_gen(X, Y, n_blocks=nsets,center=False,index_out=False)
|
cv_iter = pls_gen(X, Y, n_blocks=nsets,center=False,index_out=False)
|
||||||
m, n = X.shape
|
m, n = X.shape
|
||||||
k, l = Y.shape
|
k, l = Y.shape
|
||||||
o, p = Z.shape
|
o, p = Z.shape
|
||||||
|
@ -260,15 +259,11 @@ def lpls_jk(X, Y, Z, a_max, nsets=None, alpha=.5):
|
||||||
WWz = empty((nsets, o, a_max), 'd')
|
WWz = empty((nsets, o, a_max), 'd')
|
||||||
#WWy = empty((nsets, l, a_max), 'd')
|
#WWy = empty((nsets, l, a_max), 'd')
|
||||||
for i, (xcal,xi,ycal,yi) in enumerate(cv_iter):
|
for i, (xcal,xi,ycal,yi) in enumerate(cv_iter):
|
||||||
T, W, P, Q, U, L, K, B, b0, evx, evy, evz = nipals_lpls(xcal,ycal,Z,
|
dat = nipals_lpls(xcal,ycal,Z,a_max=a_max,alpha=alpha,
|
||||||
a_max=a_max,
|
mean_ctr=[2,0,1],scale='loads',verbose=False)
|
||||||
alpha=alpha,
|
WWx[i,:,:] = dat['W']
|
||||||
mean_ctr=[2,0,1],
|
WWz[i,:,:] = dat['L']
|
||||||
scale='loads',
|
#WWy[i,:,:] = dat['Q']
|
||||||
verbose=False)
|
|
||||||
WWx[i,:,:] = W
|
|
||||||
WWz[i,:,:] = L
|
|
||||||
#WWy[i,:,:] = Q
|
|
||||||
|
|
||||||
return WWx, WWz
|
return WWx, WWz
|
||||||
|
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
|
|
||||||
def smdb_annot(orflist=None, input_fname='registry.genenames.tab', output_fname='yeast.annot'):
|
|
||||||
|
|
||||||
"""Reads registry.genenames.tab from the Stanford yeast
|
|
||||||
microarray database.
|
|
||||||
|
|
||||||
Available from:
|
|
||||||
ftp://genome-ftp.stanford.edu/pub/yeast/data_download/gene_registry/registry.genenames.tab
|
|
||||||
|
|
||||||
input: orf -- list of orfs (open reading frames)
|
|
||||||
file -- (optional) file to fetch info from
|
|
||||||
|
|
||||||
registry.genames contains:
|
|
||||||
|
|
||||||
0 = Locus name
|
|
||||||
1 = Other name
|
|
||||||
2 = Description
|
|
||||||
3 = Gene product
|
|
||||||
4 = Phenotype
|
|
||||||
5 = ORF name
|
|
||||||
6 = SGDID
|
|
||||||
|
|
||||||
NB! Other name, Gene product and Phenotype may have more
|
|
||||||
than one mapping. These are separated by |
|
|
||||||
|
|
||||||
Output: writes an annotation file
|
|
||||||
|
|
||||||
"""
|
|
||||||
outfile = open(output_fname, 'w')
|
|
||||||
header = "Orf\tLocus_id\tOther_name\tDescription\tGene_product\tPhenotype\tSGD_ID\n"
|
|
||||||
outfile.write(header)
|
|
||||||
text = open(input_fname, 'r').read().splitlines()
|
|
||||||
for line in text:
|
|
||||||
els = line.split('\t')
|
|
||||||
orf_name = els.pop(5)
|
|
||||||
if orf_name!='': # we dont care about non-named orfs
|
|
||||||
if orflist and orf_name not in orflist:
|
|
||||||
break
|
|
||||||
for e in els:
|
|
||||||
if e !='':
|
|
||||||
outfile.write(str(e) + "\t")
|
|
||||||
else:
|
|
||||||
outfile.write("NA")
|
|
||||||
f.write("\n")
|
|
Reference in New Issue