This commit is contained in:
2007-09-20 16:10:40 +00:00
parent d9e5398865
commit 7e9a0882f1
4 changed files with 168 additions and 59 deletions

View File

@@ -19,42 +19,49 @@ if use_data=='smoker':
Y = DY.asarray().astype('d')
gene_ids = DX.get_identifiers('gene_ids', sorted=True)
elif use_data=='scherf':
DX = dataset.read_ftsv(open("../../data/scherf/Scherf.ftsv"))
DY = dataset.read_ftsv(open("../../data/scherf/Yd.ftsv"))
DX = dataset.read_ftsv(open("../../data/scherf/scherfX.ftsv"))
DY = dataset.read_ftsv(open("../../data/scherf/scherfY.ftsv"))
Y = DY.asarray().astype('d')
gene_ids = DX.get_identifiers('gene_ids', sorted=True)
elif use_data=='staunton':
pass
elif use_data=='uma':
DX = dataset.read_ftsv(open("../../data/uma/X133.ftsv"))
DY = dataset.read_ftsv(open("../../data/uma/Yg133.ftsv"))
DYg = dataset.read_ftsv(open("../../data/uma/Yg133.ftsv"))
DY = dataset.read_ftsv(open("../../data/uma/Yd.ftsv"))
Y = DY.asarray().astype('d')
gene_ids = DX.get_identifiers('gene_ids', sorted=True)
# Use only subset defined on GO
ontology = 'BP'
print "\n\nFiltering genes by Go terms "
# use subset with defined GO-terms
gene2goterms = rpy_go.goterms_from_gene(gene_ids)
all_terms = set()
for t in gene2goterms.values():
all_terms.update(t)
terms = list(all_terms)
print "\nNumber of go-terms: %s" %len(terms)
# update genelist
gene_ids = gene2goterms.keys()
print "\nNumber of genes: %s" %len(gene_ids)
X = DX.asarray()
index = DX.get_indices('gene_ids', gene_ids)
X = X[:,index]
1/0
# Use only subset defined on GO
ontology = 'BP'
print "\n\nFiltering genes by Go terms "
# use subset based on SAM or IQR
subset = 'm'
subset = 'not'
if subset=='sam':
# select subset genes by SAM
rpy.r.library("siggenes")
rpy.r.library("qvalue")
data = DX.asarray().T
# data = data[:100,:]
rpy.r.assign("data", data)
cl = dot(DY.asarray(), diag([1,2,3])).sum(1)
rpy.r.assign("data", X.T)
cl = dot(DY.asarray(), diag(arange(Y.shape[1])+1)).sum(1)
rpy.r.assign("cl", cl)
rpy.r.assign("B", 20)
# Perform a SAM analysis.
@@ -65,13 +72,21 @@ if subset=='sam':
qq = rpy.r('qobj<-qvalue(sam.out@p.value)')
qvals = asarray(qq['qvalues'])
# cut off
cutoff = 0.001
cutoff = 0.01
index = where(qvals<cutoff)[0]
# Subset data
X = DX.asarray()
#Xr = X[:,index]
gene_ids = DX.get_identifiers('gene_ids', index)
X = X[:,index]
gene_ids = [gid for i, gid in enumerate(gene_ids) if i in index]
print "\nWorking on subset with %s genes " %len(gene_ids)
# update valid go-terms
gene2goterms = rpy_go.goterms_from_gene(gene_ids)
all_terms = set()
for t in gene2goterms.values():
all_terms.update(t)
terms = list(all_terms)
print "\nNumber of go-terms: %s" %len(terms)
else:
# noimp (smoker data is prefiltered)
pass
@@ -97,9 +112,9 @@ Xr = DX.asarray()[:,newind]
######## LPLSR ########
print "LPLSR ..."
a_max = 5
a_max = 10
aopt = 3
xz_alpha = .5
xz_alpha = .6
w_alpha = .1
mean_ctr = [2, 0, 2]
@@ -108,9 +123,9 @@ sdtz = False
if sdtz:
Z = Z/Z.std(0)
T, W, P, Q, U, L, K, B, b0, evx, evy, evz = nipals_lpls(Xr,Y,Z, a_max,
alpha=xz_alpha,
mean_ctr=mean_ctr)
T, W, P, Q, U, L, K, B, b0, evx, evy, evz,mnx,mny,mnz = nipals_lpls(Xr,Y,Z, a_max,
alpha=xz_alpha,
mean_ctr=mean_ctr)
# Correlation loadings
dx,Rx,rssx = correlation_loadings(Xr, T, P)
@@ -118,11 +133,13 @@ dx,Ry,rssy = correlation_loadings(Y, T, Q)
cadz,Rz,rssz = correlation_loadings(Z.T, W, L)
# Prediction error
rmsep , yhat, class_error = cv_lpls(Xr, Y, Z, a_max, alpha=xz_alpha,mean_ctr=mean_ctr)
alpha_check=False
alpha_check=True
if alpha_check:
Alpha = arange(0.01, 1, .1)
Rmsep,Yhat, CE = [],[],[]
for a in Alpha:
print "alpha %f" %a
rmsep , yhat, ce = cv_lpls(Xr, Y, Z, a_max, alpha=xz_alpha,mean_ctr=mean_ctr)
Rmsep.append(rmsep)
Yhat.append(yhat)
@@ -131,11 +148,12 @@ if alpha_check:
Yhat = asarray(Yhat)
CE = asarray(CE)
# Significance Hotellings T
Wx, Wz, Wy, = jk_lpls(Xr, Y, Z, aopt, mean_ctr=mean_ctr,alpha=w_alpha)
Wx, Wz, Wy, = jk_lpls(Xr, Y, Z, aopt, mean_ctr=mean_ctr,alpha=xz_alpha)
Ws = W*apply_along_axis(norm, 0, T)
tsqx = cx_stats.hotelling(Wx, Ws[:,:aopt])
tsqz = cx_stats.hotelling(Wz, L[:,:aopt])
tsqx = cx_stats.hotelling(Wx, Ws[:,:aopt], alpha=w_alpha)
tsqz = cx_stats.hotelling(Wz, L[:,:aopt], alpha=0)
## plots ##
@@ -156,12 +174,12 @@ title('Classification accuracy')
figure(3) # Hypoid correlations
tsqz_s = 250*tsqz/tsqz.max()
plot_corrloads(Rz, pc1=0, pc2=1, s=tsqz_s, c='b', zorder=5, expvar=evz, ax=None)
plot_corrloads(Rz, pc1=0, pc2=1, s=tsqz_s, c=tsqz, zorder=5, expvar=evz, ax=None,alpha=.5)
ax = gca()
ylabels = DY.get_identifiers('_status', sorted=True)
plot_corrloads(Ry, pc1=0, pc2=1, s=150, c='g', zorder=5, expvar=evy, ax=ax,labels=ylabels)
ylabels = DY.get_identifiers(DY.get_dim_name()[1], sorted=True)
plot_corrloads(Ry, pc1=0, pc2=1, s=150, c='g', zorder=5, expvar=evy, ax=ax,labels=ylabels,alpha=.5)
figure(3)
figure(4)
subplot(221)
ax = gca()
plot_corrloads(Rx, pc1=0, pc2=1, s=tsqx/2.0, c='b', zorder=5, expvar=evx, ax=ax)