Projects/laydi
Projects
/
laydi
Archived
7
0
Fork 0
This repository has been archived on 2024-07-04. You can view files and clone it, but cannot push or open issues or pull requests.
laydi/scripts/lpls/run_smoker.py

655 lines
20 KiB
Python
Raw Normal View History

2008-02-08 15:58:46 +01:00
import sys,time,cPickle
2007-07-20 11:36:26 +02:00
import rpy
2007-11-07 13:34:13 +01:00
from pylab import gca, figure, subplot,plot
2007-07-20 11:36:26 +02:00
from scipy import *
2007-11-07 13:34:13 +01:00
from scipy.linalg import norm
from lpls import correlation_loadings
2007-07-20 11:36:26 +02:00
import rpy_go
sys.path.append("../../fluents") # home of dataset
sys.path.append("../../fluents/lib") # home of cx_stats
2007-11-07 13:34:13 +01:00
sys.path.append("/home/flatberg/fluents/scripts/lpls")
2008-02-08 15:58:46 +01:00
sys.path.append("/home/flatberg/pyblm/")
2007-07-20 11:36:26 +02:00
import dataset
import cx_stats
2008-02-08 15:58:46 +01:00
import pyblm
from pyblm.engines import nipals_lpls, pls
from pyblm.crossvalidation import lpls_val, lpls_jk
from pyblm.statistics import pls_qvals
2007-11-07 13:34:13 +01:00
from plots_lpls import plot_corrloads, plot_dag
import plots_lpls
2008-02-08 15:58:46 +01:00
def iqr(X, axis=0):
"""Interquartile range filtering."""
def _iqr(c):
return stats.scoreatpercentile(c, 75) - stats.scoreatpercentile(c, 25)
return apply_along_axis(_iqr, axis, X)
2007-11-07 13:34:13 +01:00
# Possible outliers
# http://www.pubmedcentral.nih.gov/articlerender.fcgi?tool=pubmed&pubmedid=16817967
sample_outliers = ['OV:NCI_ADR_RES', 'CNS:SF_295', 'CNS:SF_539', 'RE:SN12C', 'LC:NCI_H226', 'LC:NCI_H522', 'PR:PC_3', 'PR:DU_145']
2008-02-08 15:58:46 +01:00
outlier = 'ME:LOXIMVI' # 19
2007-11-07 13:34:13 +01:00
####### OPTIONS ###########
# data
2008-02-08 15:58:46 +01:00
2007-11-07 13:34:13 +01:00
chip = "hgu133a"
use_data = 'uma'
2008-02-08 15:58:46 +01:00
#use_data = 'scherf'
#use_data = 'uma'
if use_data == 'scherf':
data_cached = False
use_saved_plsr_result = False
subset = 'plsr'
small_test = False
use_sbg_subset = True # the sandberg nci-Ygroups subset
std_y = False
std_z = False
# go
ontology = "bp"
min_genes = 5
similarities = ("JiangConrath","Resnik","Lin","CoutoEnriched","CoutoJiangConrath","CoutoResnik","CoutoLin")
meth = similarities[2]
go_term_sim = "OA"
# lpls
a_max = 10
aopt = 4
aopt = 2 # doubling-time
xz_alpha = .5
w_alpha = .3
center_axis = [2, 0, 2]
zorth = True
nsets = None
qval_cutoff = 0.1
n_iter = 50
alpha_check = True
calc_rmsep = True
bevel_check = False
save_calc = True
elif use_data == 'uma':
data_cached = False
use_saved_plsr_result = False
subset = 'iqr'
small_test = False
use_sbg_subset = True # the sandberg nci-Ygroups subset
std_y = False
std_z = False
# go
ontology = "bp"
min_genes = 5
similarities = ("JiangConrath","Resnik","Lin","CoutoEnriched","CoutoJiangConrath","CoutoResnik","CoutoLin")
meth = similarities[2]
go_term_sim = "OA"
# lpls
a_max = 10
aopt = 5
xz_alpha = .5
w_alpha = .3
center_axis = [2, 0, 2]
zorth = True
nsets = None
qval_cutoff = 0.01
n_iter = 50
alpha_check = True
calc_rmsep = True
bevel_check = False
save_calc = True
elif use_data == 'smoker':
data_cached = False
use_saved_plsr_result = False
#subset = 'plsr'
subset = 'plsr'
small_test = False
use_sbg_subset = False # the sandberg nci-Ygroups subset
std_y = False
std_z = False
# go
ontology = "bp"
min_genes = 5
similarities = ("JiangConrath","Resnik","Lin","CoutoEnriched","CoutoJiangConrath","CoutoResnik","CoutoLin")
meth = similarities[2]
go_term_sim = "OA"
# lpls
a_max = 5
aopt = 2
xz_alpha = .5
w_alpha = .3
center_axis = [2, 0, 2]
zorth = True
nsets = None
qval_cutoff = 0.01
n_iter = 50
alpha_check = True
calc_rmsep = True
bevel_check = False
save_calc = True
else:
raise ValueError
2007-11-07 13:34:13 +01:00
2008-02-08 15:58:46 +01:00
print "Using options for : " + use_data
2007-07-20 11:36:26 +02:00
######## DATA ##########
2007-08-21 12:25:23 +02:00
if use_data=='smoker':
# full smoker data
2008-02-08 15:58:46 +01:00
DX = dataset.read_ftsv(open("/home/flatberg/datasets/smokers/full/Smokers.ftsv"))
DY = dataset.read_ftsv(open("/home/flatberg/datasets/smokers/full/Yg.ftsv"))
DYr = dataset.read_ftsv(open("/home/flatberg/datasets/smokers/full/Ypy.ftsv"))
Y = DYr.asarray().astype('d')
2007-08-21 12:25:23 +02:00
gene_ids = DX.get_identifiers('gene_ids', sorted=True)
2008-02-08 15:58:46 +01:00
sample_ids = DY.get_identifiers('_patient', sorted=True)
2007-08-21 12:25:23 +02:00
elif use_data=='scherf':
2008-02-08 15:58:46 +01:00
print "hepp"
#DX = dataset.read_ftsv(open("../../data/scherf/old_data/scherfX.ftsv"))
#DY = dataset.read_ftsv(open("../../data/scherf/old_data/scherfY.ftsv"))
DX = dataset.read_ftsv(open("nci60/X5964.ftsv", "r"))
DYg = dataset.read_ftsv(open("../../data/uma/Yg133.ftsv"))
DYr = dataset.read_ftsv(open("../../data/uma/Yd.ftsv"))
Y = DYg.asarray().astype('d')
DY = DYg.copy()
Yg = Y
Yr = DYr.asarray().astype('d')
X = DX.asarray()
2007-08-21 12:25:23 +02:00
gene_ids = DX.get_identifiers('gene_ids', sorted=True)
2008-02-08 15:58:46 +01:00
sample_ids = DY.get_identifiers('cline', sorted=True)
2007-08-21 12:25:23 +02:00
elif use_data=='staunton':
pass
2008-02-08 15:58:46 +01:00
2007-08-21 12:25:23 +02:00
elif use_data=='uma':
2008-02-08 15:58:46 +01:00
DX = dataset.read_ftsv(open("/home/flatberg/datasets/uma/X133.ftsv"))
DYg = dataset.read_ftsv(open("/home/flatberg/datasets/uma/Yg133.ftsv"))
DYr = dataset.read_ftsv(open("/home/flatberg/datasets/uma/Yd.ftsv"))
X = DX.asarray()
Y = DYg.asarray().astype('d')
DY = DYg.copy()
Yg = Y
Yr = DYr.asarray().astype('d')
2007-08-21 12:25:23 +02:00
gene_ids = DX.get_identifiers('gene_ids', sorted=True)
2008-02-08 15:58:46 +01:00
sample_ids = DY.get_identifiers('cline', sorted=True)
else:
print "use_data argument: (%s) not valid" %use_method
2007-08-21 12:25:23 +02:00
2008-02-08 15:58:46 +01:00
if use_sbg_subset and use_data in ['uma', 'scherf', 'staunton']:
print "Using sbg subset of cancers"
Y = Yg
Y_old = Y.copy()
Yr_old = Yr.copy()
X_old = X.copy()
keep_samples = ['CN', 'ME', 'LE', 'CO', 'RE']
#keep_samples = ['CN', 'ME', 'LE', 'CO', 'RE']
sample_ids_original = DY.get_identifiers('cline', sorted=True)
sample_ids= [i for i in sample_ids if i[:2] in keep_samples]
rows_ind = [i for i,name in enumerate(sample_ids_original) if name[:2] in keep_samples]
# take out rows in X,Y
X = X[rows_ind,:]
Y = Y[rows_ind,:]
Yr = Yr[rows_ind,:]
# identify redundant columns in Y
cols_ind = where(Y.sum(0)>1)[0]
Y = Y[:, cols_ind]
# create new datasets with updated idents
cat_ids = [name for i,name in enumerate(DYg.get_identifiers('_cancer', sorted=True)) if i in cols_ind]
DX = dataset.Dataset(X, [['cline', sample_ids], ['gene_ids', gene_ids]], name='Dxr')
DYg = dataset.CategoryDataset(Y, [['cline', sample_ids], ['_cancer', cat_ids]], name='Dyr')
DYr = dataset.Dataset(Yr, [['cline', sample_ids], ['_time', ['doubling_time']]], name='Dyrr')
DY_old = DY.copy()
DY = DYg
print "Now there are %d samples in X" %X.shape[0]
# use subset of genes with defined GO-terms
2007-11-07 13:34:13 +01:00
ic_all = 2026006.0 # sum of all ic in BP
max_ic = -log(1/ic_all)
ic_cutoff = -log(min_genes/ic_all)/max_ic
print "Information cutoff for min %d genes: %.2f" %(min_genes, ic_cutoff)
gene2goterms = rpy_go.goterms_from_gene(gene_ids, ic_cutoff=ic_cutoff)
2007-07-28 18:05:11 +02:00
all_terms = set()
for t in gene2goterms.values():
all_terms.update(t)
terms = list(all_terms)
print "\nNumber of go-terms: %s" %len(terms)
2007-09-20 18:10:40 +02:00
2007-07-28 18:05:11 +02:00
# update genelist
gene_ids = gene2goterms.keys()
print "\nNumber of genes: %s" %len(gene_ids)
2007-08-21 12:25:23 +02:00
2007-09-20 18:10:40 +02:00
X = DX.asarray()
index = DX.get_indices('gene_ids', gene_ids)
X = X[:,index]
# Use only subset defined on GO
ontology = 'BP'
print "\n\nFiltering genes by Go terms "
2007-11-07 13:34:13 +01:00
# use subset based on SAM,PLSR or (IQR)
2008-02-08 15:58:46 +01:00
if subset=='plsr':
print "plsr filter on genes"
if use_saved_plsr_result:
index = cPickle.load(open('plsr_index.pkl'))
# Subset data
X = X[:,index]
gene_ids = [gid for i, gid in enumerate(gene_ids) if i in index]
print "\nNumber of genes: %s" %len(gene_ids)
print "\nWorking on subset with %s genes " %len(gene_ids)
# update valid go-terms
gene2goterms = rpy_go.goterms_from_gene(gene_ids, ic_cutoff=ic_cutoff)
all_terms = set()
for t in gene2goterms.values():
all_terms.update(t)
terms = list(all_terms)
print "\nNumber of go-terms: %s" %len(terms)
# update genelist
gene_ids = gene2goterms.keys()
else:
print "Initial plsr qvals"
xcal_tsq_x, xpert_tsq_x = pyblm.pls_qvals(X, Y, aopt=aopt, n_iter=n_iter, center_axis=[0,0], nsets=None)
qvals = pyblm.statistics._fdr(xcal_tsq_x, xpert_tsq_x, median)
# cut off
#sort_index = qvals.argsort()
#index = sort_index[:800]
#qval_cutoff = qvals[sort_index[500]]
print "Using cuf off: %.2f" %qval_cutoff
index = where(qvals<qval_cutoff)[0]
if small_test:
index = index[:20]
# Subset data
X = X[:,index]
gene_ids = [gid for i, gid in enumerate(gene_ids) if i in index]
print "\nNumber of genes: %s" %len(gene_ids)
print "\nWorking on subset with %s genes " %len(gene_ids)
# update valid go-terms
gene2goterms = rpy_go.goterms_from_gene(gene_ids, ic_cutoff=ic_cutoff)
all_terms = set()
for t in gene2goterms.values():
all_terms.update(t)
terms = list(all_terms)
print "\nNumber of go-terms: %s" %len(terms)
# update genelist
gene_ids = gene2goterms.keys()
print "\nNumber of genes: %s" %len(gene_ids)
elif subset == 'iqr':
iqr_vals = iqr(X)
index = where(iqr_vals>1)[0]
2007-09-20 18:10:40 +02:00
X = X[:,index]
gene_ids = [gid for i, gid in enumerate(gene_ids) if i in index]
2007-11-07 13:34:13 +01:00
print "\nNumber of genes: %s" %len(gene_ids)
2007-08-21 12:25:23 +02:00
print "\nWorking on subset with %s genes " %len(gene_ids)
2008-02-08 15:58:46 +01:00
2007-09-20 18:10:40 +02:00
# update valid go-terms
2007-11-07 13:34:13 +01:00
gene2goterms = rpy_go.goterms_from_gene(gene_ids, ic_cutoff=ic_cutoff)
2007-09-20 18:10:40 +02:00
all_terms = set()
for t in gene2goterms.values():
all_terms.update(t)
terms = list(all_terms)
print "\nNumber of go-terms: %s" %len(terms)
2007-11-07 13:34:13 +01:00
# update genelist
gene_ids = gene2goterms.keys()
2007-08-21 12:25:23 +02:00
else:
# noimp (smoker data is prefiltered)
2008-02-08 15:58:46 +01:00
print "No prefiltering on data used"
pass
2007-08-21 12:25:23 +02:00
2007-07-20 11:36:26 +02:00
rpy.r.library("GOSim")
# Go-term similarity matrix
2007-07-28 18:05:11 +02:00
2007-11-07 13:34:13 +01:00
print "Term-term similarity matrix (method = %s)" %meth
2007-08-21 12:25:23 +02:00
print "\nCalculating term-term similarity matrix"
2007-07-28 18:05:11 +02:00
2007-11-07 13:34:13 +01:00
if meth=="CoutoEnriched":
aa = 0
ba = 0
2008-02-08 15:58:46 +01:00
rpy.r.setEnrichmentFactors(alpha = aa, beta =ba)
if not data_cached:
rpytmat = rpy.with_mode(rpy.NO_CONVERSION, rpy.r.getTermSim)(terms, method=meth,verbose=False)
tmat = rpy.r.assign("haha", rpytmat)
print "\n Calculating Z matrix"
Z = rpy_go.genego_sim(gene2goterms,gene_ids,terms,rpytmat,go_term_sim=go_term_sim,term_sim=meth)
DZ = dataset.Dataset(Z, [['go-terms', terms], ['gene_ids', gene_ids]], name='Dz_'+str(meth))
# update data (X) matrix
newind = DX.get_indices('gene_ids', gene_ids)
Xr = DX.asarray()[:,newind]
DXr = dataset.Dataset(Xr, [['cline', sample_ids], ['gene_ids', gene_ids]], name='Dxr')
else:
#DXr = dataset.read_ftsv(open('Xr.ftsv', 'r'))
newind = DX.get_indices('gene_ids', gene_ids)
Xr = DX.asarray()[:,newind]
DXr = dataset.Dataset(Xr, [['cline', sample_ids], ['gene_ids', gene_ids]], name='Dxr')
DY = dataset.read_ftsv(open('Y.ftsv', 'r'))
DZ = dataset.read_ftsv(open('Z.ftsv', 'r'))
Xr = DXr.asarray()
Y = DY.asarray()
Z = DZ.asarray()
sample_ids = DX.get_identifiers('cline', sorted=True)
2007-08-21 12:25:23 +02:00
# standardize Z?
sdtz = False
if sdtz:
2008-02-08 15:58:46 +01:00
DZ._array = DZ._array/Dz._array.std(0)
sdty = False
2007-11-07 13:34:13 +01:00
if sdty:
2008-02-08 15:58:46 +01:00
DY._array = DY._array/DY._array.std(0)
# ##### PLS ONLY, CHECK FOR SIMILARITY BETWEEN W and Z #######
if bevel_check:
Xr = DXr.asarray()
Y = DY.asarray()
from pylab import figure, scatter, xlabel, subplot,xticks,yticks
Xrcc = Xr - Xr.mean(0) - Xr.mean(1)[:,newaxis] + Xr.mean()
Zcc = Z - Z.mean(0) - Z.mean(1)[:,newaxis] + Z.mean()
Yc = Y - Y.mean(0)
xy_pls_result = pls(Xrcc, Yc, a_max)
xz_pls_result = pls(Xrcc.T, Zcc.T, a_max)
# check for linearity between scores of xz-result and W of xy-result
Wxy = xy_pls_result['W']
Txz = xz_pls_result['T']
figure()
n = 0
for i in range(a_max):
w = Wxy[:,i]
for j in range(a_max):
n += 1
t = Txz[:,j]
r2 = stats.corrcoef(w, t)[0,-1]
subplot(a_max, a_max, n)
scatter(w, t)
xticks([])
yticks([])
xlabel('(Wxy(%d), Tzx(%d)), r2: %.1f ' %(i+1,j+1,r2))
# ####### LPLSR ########
if save_calc and not data_cached:
print "Saving calculations"
import cPickle
fh = open("g2go_s.pkl", "w")
cPickle.dump(gene2goterms, fh)
fh.close()
dataset.write_ftsv(open('Xs.ftsv', 'w'), DXr, decimals=7)
dataset.write_ftsv(open('Ysg.ftsv', 'w'), DY, decimals=7)
dataset.write_ftsv(open('Yspy.ftsv', 'w'), DYr, decimals=7)
dataset.write_ftsv(open('Zs.ftsv', 'w'), DZ, decimals=7)
def read_calc():
import cPickle
fh = open("g2go_s.pkl")
gene2goterms = cPickle.load(fh)
fh.close()
DXr = dataset.read_ftsv('Xu.ftsv')
DY = dataset.read_ftsv('Yu.ftsv')
DYr = dataset.read_ftsv('Ydu.ftsv')
DZ = dataset.read_ftsv('Zu.ftsv')
return DXr, DY, DYr, DZ, gene2goterms
print "LPLSR ..."
lpls_result = nipals_lpls(Xr,Y,Z, a_max,alpha=xz_alpha, center_axis=center_axis, zorth=zorth)
2007-11-07 13:34:13 +01:00
globals().update(lpls_result)
2007-07-20 11:36:26 +02:00
# Correlation loadings
2007-07-23 15:25:34 +02:00
dx,Rx,rssx = correlation_loadings(Xr, T, P)
dx,Ry,rssy = correlation_loadings(Y, T, Q)
cadz,Rz,rssz = correlation_loadings(Z.T, W, L)
2007-11-07 13:34:13 +01:00
2007-07-20 11:36:26 +02:00
# Prediction error
2007-11-07 13:34:13 +01:00
if calc_rmsep:
2008-02-08 15:58:46 +01:00
rmsep , yhat, class_error = pyblm.crossvalidation.lpls_val(Xr, Y, Z, a_max, alpha=xz_alpha,center_axis=center_axis, nsets=nsets,zorth=zorth)
2007-11-07 13:34:13 +01:00
2008-02-08 15:58:46 +01:00
Alpha = arange(0.0, 1.01, .05)
if alpha_check:
2007-07-24 16:34:47 +02:00
Rmsep,Yhat, CE = [],[],[]
for a in Alpha:
2007-09-20 18:10:40 +02:00
print "alpha %f" %a
2008-02-08 15:58:46 +01:00
rmsep_a , yhat, ce = pyblm.lpls_val(Xr, Y, Z, a_max, alpha=a,
center_axis=center_axis,nsets=nsets,
zorth=zorth)
Rmsep.append(rmsep_a.copy())
Yhat.append(yhat.copy())
CE.append(ce.copy())
2007-07-24 16:34:47 +02:00
Rmsep = asarray(Rmsep)
2008-02-08 15:58:46 +01:00
Yhat = asarray(Yhat)
2007-11-07 13:34:13 +01:00
#CE = asarray(CE)
2007-07-20 11:36:26 +02:00
2008-02-08 15:58:46 +01:00
random_alpha_check = True
if random_alpha_check:
n_zrand = 100
RMS,YHAT, CEE = [],[],[]
zindex = arange(Z.shape[1])
for ii in range(n_zrand):
zind_rand = zindex.copy()
random.shuffle(zind_rand)
Zrand = Z[:,zind_rand]
#Alpha = arange(0.0, 1.1, .25)
Rmsep_r,Yhat_r, CE_r = [],[],[]
for a in Alpha:
print "Iter: %d alpha %.2f" %(ii, a)
rmsep , yhat, ce = pyblm.lpls_val(Xr, Y, Zrand, a_max, alpha=a,center_axis=center_axis,nsets=nsets, zorth=zorth)
Rmsep_r.append(rmsep.copy())
Yhat_r.append(yhat.copy())
CE_r.append(ce.copy())
RMS.append(Rmsep_r)
YHAT.append(Yhat_r)
CEE.append(CE_r)
RMS = asarray(RMS)
YHAT = asarray(YHAT)
CEE = asarray(CEE)
2007-09-20 18:10:40 +02:00
2007-07-20 11:36:26 +02:00
# Significance Hotellings T
2008-02-08 15:58:46 +01:00
calc_qvals = True
if not calc_qvals:
Wx, Wz = pyblm.crossvalidation.lpls_jk(Xr, Y, Z, aopt, center_axis=center_axis, xz_alpha=xz_alpha, nsets=nsets)
Ws = W*apply_along_axis(norm, 0, T)
Ws = Ws[:,:aopt]
cal_tsq_x = pyblm.statistics.hotelling(Wx, Ws[:,:aopt], alpha=w_alpha)
Ls = L*apply_along_axis(norm, 0, K)
cal_tsq_z = pyblm.statistics.hotelling(Wz, Ls[:,:aopt], alpha=0.01)
2007-11-07 13:34:13 +01:00
# qvals
2008-02-08 15:58:46 +01:00
if calc_qvals:
cal_tsq_z, pert_tsq_z, cal_tsq_x, pert_tsq_x = pyblm.lpls_qvals(Xr, Y, Z, aopt=aopt, zx_alpha=xz_alpha, n_iter=n_iter, nsets=nsets)
2007-11-07 13:34:13 +01:00
2008-02-08 15:58:46 +01:00
qvalz = pyblm.statistics._fdr(cal_tsq_z, pert_tsq_z, median)
qvalx = pyblm.statistics._fdr(cal_tsq_x, pert_tsq_x, median)
2007-11-07 13:34:13 +01:00
2008-02-08 15:58:46 +01:00
# p-values, set-enrichment analysis
active_genes_ids = where(qvalx < qval_cutoff)[0]
active_genes = [name for i,name in enumerate(gene_ids) if i in active_genes_ids]
active_universe = gene_ids
gsea_result, gsea_params= rpy_go.gene_GO_hypergeo_test(genelist=active_genes,universe=active_universe,chip=chip,pval_cutoff=1.0,cond=False,test_direction="over")
active_goterms_ids = where(qvalz < qval_cutoff)[0]
active_goterms = [name for i,name in enumerate(terms) if i in active_goterms_ids]
gsea_t2p = dict(zip(gsea_result['GOBPID'], gsea_result['Pvalue']))
2007-11-07 13:34:13 +01:00
#### PLOTS ####
from pylab import *
from scipy import where
dg = plots_lpls.dag(terms, "bp")
pos = None
2008-02-08 15:58:46 +01:00
if calc_qvals:
figure(300)
subplot(2,1,1)
pos = plots_lpls.plot_dag(dg, node_color=cal_tsq_z, pos=pos, nodelist=terms)
ax = gca()
colorbar(ax.collections[0])
xlabel('q values')
xticks([])
yticks([])
subplot(2,1,2)
pos = plot_dag(dg, node_color=qvalz, pos=pos, nodelist=terms)
ax = gca()
colorbar(ax.collections[0])
xlabel('T2 values')
else:
figure(300)
subplot(2,1,1)
pos = plots_lpls.plot_dag(dg, pos=pos, nodelist=terms)
2007-11-07 13:34:13 +01:00
if calc_rmsep:
2008-02-08 15:58:46 +01:00
figure(190) #rmsep
bar_col = 'rgbcmyk'*2
2007-11-07 13:34:13 +01:00
m = Y.shape[1]
2008-02-08 15:58:46 +01:00
bar_w = 1./(m + 2.)
2007-11-07 13:34:13 +01:00
for a in range(m):
2008-02-08 15:58:46 +01:00
bar(arange(a_max)+a*bar_w+.1, rmsep[a,:], width=bar_w, color=bar_col[a])
2007-11-07 13:34:13 +01:00
ylim([rmsep.min()-.05, rmsep.max()+.05])
2008-02-08 15:58:46 +01:00
title('RMSEP: Y(%s)' %DY.get_name())
2007-11-07 13:34:13 +01:00
#figure(2)
#for a in range(m):
# bar(arange(a_max)+a*bar_w+.1, class_error[:,a], width=bar_w, color=bar_col[a])
#ylim([class_error.min()-.05, class_error.max()+.05])
#title('Classification accuracy')
2008-02-08 15:58:46 +01:00
figure(5) # Hyploid correlations
pc1 = 2
pc2 = 3
2007-11-07 13:34:13 +01:00
tsqz = cal_tsq_z
tsqx = cal_tsq_x
2008-02-08 15:58:46 +01:00
tsqz_s = 550*tsqz/tsqz.max()
2007-11-07 13:34:13 +01:00
td = rpy_go.goterm2desc(terms)
tlabels = [td[i] for i in terms]
2008-02-08 15:58:46 +01:00
#keep = tsqz.argsort()[:100]
#k_Rz = Rz[keep,:]
#k_tsqz_s = tsqz_s[keep]
#k_tsq = tsqz[keep]
#k_tlabels = [name for i,name in enumerate(tlabels) if i in keep]
plot_corrloads(Rz, pc1=pc1, pc2=pc2, s=tsqz_s, c=tsqz, zorder=6, expvar=evz, ax=None,alpha=.9,labels=None)
2007-11-07 13:34:13 +01:00
#plot_corrloads(k_Rz, pc1=0, pc2=1, s=k_tsqz_s, c=k_tsqz, zorder=5, expvar=evz, ax=None,alpha=.5,labels=None)
2007-07-20 11:36:26 +02:00
ax = gca()
2008-02-08 15:58:46 +01:00
ylabels = DYg.get_identifiers(DYg.get_dim_name()[1], sorted=True)
#ylabels = DYr.get_identifiers(DYr.get_dim_name()[1], sorted=True)
#blabels = yglabels[:]
#blabels.append(ylabels[0])
plot_corrloads(Ry, pc1=pc1, pc2=pc2, s=350, c='g', marker='s', zorder=7, expvar=evy, ax=ax,labels=ylabels,alpha=1.0, drawback=False)
plot_corrloads(Rx, pc1=pc1, pc2=pc2, s=3, c=(.6,.6,.6), alpha=1, zorder=4, expvar=evx, ax=ax, drawback=False, faceted=False)
2007-11-07 13:34:13 +01:00
2007-07-20 11:36:26 +02:00
2007-09-20 18:10:40 +02:00
figure(4)
2007-07-20 11:36:26 +02:00
subplot(221)
ax = gca()
plot_corrloads(Rx, pc1=0, pc2=1, s=tsqx/2.0, c='b', zorder=5, expvar=evx, ax=ax)
# title('X correlation')
subplot(222)
ax = gca()
2008-02-08 15:58:46 +01:00
plot_corrloads(Ry, pc1=0, pc2=1, s=250, c='g', zorder=5, expvar=evy, ax=ax)
2007-07-20 11:36:26 +02:00
#title('Y correlation')
subplot(223)
ax = gca()
plot_corrloads(Rz, pc1=0, pc2=1, s=tsqz/10.0, c='r', zorder=5, expvar=evz, ax=ax)
#title('Z correlation')
subplot(224)
plot(arange(len(evx)), evx, 'b', label='X', linewidth=2)
plot(evy, 'g', label='Y', linewidth=2)
plot(evz, 'r', label='Z', linewidth=2)
legend(loc=2)
ylabel('Explained variance')
xlabel('Component')
2008-02-08 15:58:46 +01:00
xticks((arange(len(evx))), [str(int(i+1)) for i in arange(len(evx))])
2007-07-20 11:36:26 +02:00
show()
2008-02-08 15:58:46 +01:00
figure(19)
#subplot(1,2,1)
# RMS : (n_rand_iter, n_alpha, nvarY, a_max)
# Rmsep : (n_alpha, nvarY, a_max)
rms = RMS[:,:,:,aopt] # looking at solution at aopt
m_rms = rms.mean(2) # mean over all y-variables
mm_rms = m_rms.mean(0) # mean over iterations
std_mrms = m_rms.std(0) # standard deviation over iterations
rms_t = Rmsep[:,:,aopt]
m_rms_t = rms_t.mean(1)
xax = arange(mm_rms.shape[0])
std2_lim_down = mm_rms - 1.*std_mrms
std2_lim_up = mm_rms + 1.*std_mrms
xx = r_[xax, xax[::-1]]
yy = r_[std2_lim_down, std2_lim_up[::-1]]
fill(xx, yy, fc='.9')
plot(mm_rms, '--r', lw=1.5, label='Perm. mean')
plot(std2_lim_down, 'b--')
plot(std2_lim_up, 'b--', label='Perm. 2*std')
plot(m_rms_t, 'g', lw=1.5, label='True')
#c_ylim = ylim()
#ylim(c_ylim[0], c_ylim[1]-1)
alpha_ind = linspace(0, Alpha.shape[0]-1, 11)
xticks(alpha_ind, ['%.1f' %a for a in arange(0,1.01, .1)])
xlabel(r'$\alpha$')
ylabel('mean error')
leg = legend(loc=2)
# delete fill from legend
del leg.texts[-1]
del leg.legendHandles[-1]
# delete one of the std legends
del leg.texts[1]
del leg.legendHandles[1]
klass = True
if klass:
figure(20)
# subplot(1,2,1)
# RMS : (n_rand_iter, n_alpha, nvarY, a_max)
# Rmsep : (n_alpha, nvarY, a_max)
cee = CEE[:,:,aopt,:] # looking at solution at aopt
m_cee = cee.mean(-1) # mean over all y-variables
mm_cee = m_cee.mean(0) # mean over iterations
std_cee = m_cee.std(0) # standard deviation over iterations
CE = asarray(CE)
cee_t = CE[:,:,aopt]
m_cee_t = cee_t.mean(1)
xax = arange(mm_cee.shape[0])
std2_lim_down = mm_cee - 2*std_cee
std2_lim_up = mm_cee + 2*std_cee
xx = r_[xax, xax[::-1]]
yy = r_[std2_lim_down, std2_lim_up[::-1]]
fill(xx, yy, fc='.9')
plot(mm_cee, '--r', lw=1.5)
plot(std2_lim_down, 'b--')
plot(std2_lim_up, 'b--')
plot(m_cee_t, 'g', lw=1.5)
c_ylim = ylim()
ylim = ylim(c_ylim[0], .2)
xticks(xax, [str(a)[:3] for a in Alpha])
xlabel(r'$\alpha$')
ylabel('mean error')