import sys,time,cPickle import rpy from pylab import gca, figure, subplot,plot from scipy import * from scipy.linalg import norm from lpls import correlation_loadings import rpy_go sys.path.append("../../fluents") # home of dataset sys.path.append("../../fluents/lib") # home of cx_stats sys.path.append("/home/flatberg/fluents/scripts/lpls") sys.path.append("/home/flatberg/pyblm/") import dataset import cx_stats import pyblm from pyblm.engines import nipals_lpls, pls from pyblm.crossvalidation import lpls_val, lpls_jk from pyblm.statistics import pls_qvals from plots_lpls import plot_corrloads, plot_dag import plots_lpls def iqr(X, axis=0): """Interquartile range filtering.""" def _iqr(c): return stats.scoreatpercentile(c, 75) - stats.scoreatpercentile(c, 25) return apply_along_axis(_iqr, axis, X) # Possible outliers # http://www.pubmedcentral.nih.gov/articlerender.fcgi?tool=pubmed&pubmedid=16817967 sample_outliers = ['OV:NCI_ADR_RES', 'CNS:SF_295', 'CNS:SF_539', 'RE:SN12C', 'LC:NCI_H226', 'LC:NCI_H522', 'PR:PC_3', 'PR:DU_145'] outlier = 'ME:LOXIMVI' # 19 ####### OPTIONS ########### # data chip = "hgu133a" use_data = 'uma' #use_data = 'scherf' #use_data = 'uma' if use_data == 'scherf': data_cached = False use_saved_plsr_result = False subset = 'plsr' small_test = False use_sbg_subset = True # the sandberg nci-Ygroups subset std_y = False std_z = False # go ontology = "bp" min_genes = 5 similarities = ("JiangConrath","Resnik","Lin","CoutoEnriched","CoutoJiangConrath","CoutoResnik","CoutoLin") meth = similarities[2] go_term_sim = "OA" # lpls a_max = 10 aopt = 4 aopt = 2 # doubling-time xz_alpha = .5 w_alpha = .3 center_axis = [2, 0, 2] zorth = True nsets = None qval_cutoff = 0.1 n_iter = 50 alpha_check = True calc_rmsep = True bevel_check = False save_calc = True elif use_data == 'uma': data_cached = False use_saved_plsr_result = False subset = 'iqr' small_test = False use_sbg_subset = True # the sandberg nci-Ygroups subset std_y = False std_z = False # go ontology = "bp" min_genes = 5 similarities = ("JiangConrath","Resnik","Lin","CoutoEnriched","CoutoJiangConrath","CoutoResnik","CoutoLin") meth = similarities[2] go_term_sim = "OA" # lpls a_max = 10 aopt = 5 xz_alpha = .5 w_alpha = .3 center_axis = [2, 0, 2] zorth = True nsets = None qval_cutoff = 0.01 n_iter = 50 alpha_check = True calc_rmsep = True bevel_check = False save_calc = True elif use_data == 'smoker': data_cached = False use_saved_plsr_result = False #subset = 'plsr' subset = 'plsr' small_test = False use_sbg_subset = False # the sandberg nci-Ygroups subset std_y = False std_z = False # go ontology = "bp" min_genes = 5 similarities = ("JiangConrath","Resnik","Lin","CoutoEnriched","CoutoJiangConrath","CoutoResnik","CoutoLin") meth = similarities[2] go_term_sim = "OA" # lpls a_max = 5 aopt = 2 xz_alpha = .5 w_alpha = .3 center_axis = [2, 0, 2] zorth = True nsets = None qval_cutoff = 0.01 n_iter = 50 alpha_check = True calc_rmsep = True bevel_check = False save_calc = True else: raise ValueError print "Using options for : " + use_data ######## DATA ########## if use_data=='smoker': # full smoker data DX = dataset.read_ftsv(open("/home/flatberg/datasets/smokers/full/Smokers.ftsv")) DY = dataset.read_ftsv(open("/home/flatberg/datasets/smokers/full/Yg.ftsv")) DYr = dataset.read_ftsv(open("/home/flatberg/datasets/smokers/full/Ypy.ftsv")) Y = DYr.asarray().astype('d') gene_ids = DX.get_identifiers('gene_ids', sorted=True) sample_ids = DY.get_identifiers('_patient', sorted=True) elif use_data=='scherf': print "hepp" #DX = dataset.read_ftsv(open("../../data/scherf/old_data/scherfX.ftsv")) #DY = dataset.read_ftsv(open("../../data/scherf/old_data/scherfY.ftsv")) DX = dataset.read_ftsv(open("nci60/X5964.ftsv", "r")) DYg = dataset.read_ftsv(open("../../data/uma/Yg133.ftsv")) DYr = dataset.read_ftsv(open("../../data/uma/Yd.ftsv")) Y = DYg.asarray().astype('d') DY = DYg.copy() Yg = Y Yr = DYr.asarray().astype('d') X = DX.asarray() gene_ids = DX.get_identifiers('gene_ids', sorted=True) sample_ids = DY.get_identifiers('cline', sorted=True) elif use_data=='staunton': pass elif use_data=='uma': DX = dataset.read_ftsv(open("/home/flatberg/datasets/uma/X133.ftsv")) DYg = dataset.read_ftsv(open("/home/flatberg/datasets/uma/Yg133.ftsv")) DYr = dataset.read_ftsv(open("/home/flatberg/datasets/uma/Yd.ftsv")) X = DX.asarray() Y = DYg.asarray().astype('d') DY = DYg.copy() Yg = Y Yr = DYr.asarray().astype('d') gene_ids = DX.get_identifiers('gene_ids', sorted=True) sample_ids = DY.get_identifiers('cline', sorted=True) else: print "use_data argument: (%s) not valid" %use_method if use_sbg_subset and use_data in ['uma', 'scherf', 'staunton']: print "Using sbg subset of cancers" Y = Yg Y_old = Y.copy() Yr_old = Yr.copy() X_old = X.copy() keep_samples = ['CN', 'ME', 'LE', 'CO', 'RE'] #keep_samples = ['CN', 'ME', 'LE', 'CO', 'RE'] sample_ids_original = DY.get_identifiers('cline', sorted=True) sample_ids= [i for i in sample_ids if i[:2] in keep_samples] rows_ind = [i for i,name in enumerate(sample_ids_original) if name[:2] in keep_samples] # take out rows in X,Y X = X[rows_ind,:] Y = Y[rows_ind,:] Yr = Yr[rows_ind,:] # identify redundant columns in Y cols_ind = where(Y.sum(0)>1)[0] Y = Y[:, cols_ind] # create new datasets with updated idents cat_ids = [name for i,name in enumerate(DYg.get_identifiers('_cancer', sorted=True)) if i in cols_ind] DX = dataset.Dataset(X, [['cline', sample_ids], ['gene_ids', gene_ids]], name='Dxr') DYg = dataset.CategoryDataset(Y, [['cline', sample_ids], ['_cancer', cat_ids]], name='Dyr') DYr = dataset.Dataset(Yr, [['cline', sample_ids], ['_time', ['doubling_time']]], name='Dyrr') DY_old = DY.copy() DY = DYg print "Now there are %d samples in X" %X.shape[0] # use subset of genes with defined GO-terms ic_all = 2026006.0 # sum of all ic in BP max_ic = -log(1/ic_all) ic_cutoff = -log(min_genes/ic_all)/max_ic print "Information cutoff for min %d genes: %.2f" %(min_genes, ic_cutoff) gene2goterms = rpy_go.goterms_from_gene(gene_ids, ic_cutoff=ic_cutoff) all_terms = set() for t in gene2goterms.values(): all_terms.update(t) terms = list(all_terms) print "\nNumber of go-terms: %s" %len(terms) # update genelist gene_ids = gene2goterms.keys() print "\nNumber of genes: %s" %len(gene_ids) X = DX.asarray() index = DX.get_indices('gene_ids', gene_ids) X = X[:,index] # Use only subset defined on GO ontology = 'BP' print "\n\nFiltering genes by Go terms " # use subset based on SAM,PLSR or (IQR) if subset=='plsr': print "plsr filter on genes" if use_saved_plsr_result: index = cPickle.load(open('plsr_index.pkl')) # Subset data X = X[:,index] gene_ids = [gid for i, gid in enumerate(gene_ids) if i in index] print "\nNumber of genes: %s" %len(gene_ids) print "\nWorking on subset with %s genes " %len(gene_ids) # update valid go-terms gene2goterms = rpy_go.goterms_from_gene(gene_ids, ic_cutoff=ic_cutoff) all_terms = set() for t in gene2goterms.values(): all_terms.update(t) terms = list(all_terms) print "\nNumber of go-terms: %s" %len(terms) # update genelist gene_ids = gene2goterms.keys() else: print "Initial plsr qvals" xcal_tsq_x, xpert_tsq_x = pyblm.pls_qvals(X, Y, aopt=aopt, n_iter=n_iter, center_axis=[0,0], nsets=None) qvals = pyblm.statistics._fdr(xcal_tsq_x, xpert_tsq_x, median) # cut off #sort_index = qvals.argsort() #index = sort_index[:800] #qval_cutoff = qvals[sort_index[500]] print "Using cuf off: %.2f" %qval_cutoff index = where(qvals1)[0] X = X[:,index] gene_ids = [gid for i, gid in enumerate(gene_ids) if i in index] print "\nNumber of genes: %s" %len(gene_ids) print "\nWorking on subset with %s genes " %len(gene_ids) # update valid go-terms gene2goterms = rpy_go.goterms_from_gene(gene_ids, ic_cutoff=ic_cutoff) all_terms = set() for t in gene2goterms.values(): all_terms.update(t) terms = list(all_terms) print "\nNumber of go-terms: %s" %len(terms) # update genelist gene_ids = gene2goterms.keys() else: # noimp (smoker data is prefiltered) print "No prefiltering on data used" pass rpy.r.library("GOSim") # Go-term similarity matrix print "Term-term similarity matrix (method = %s)" %meth print "\nCalculating term-term similarity matrix" if meth=="CoutoEnriched": aa = 0 ba = 0 rpy.r.setEnrichmentFactors(alpha = aa, beta =ba) if not data_cached: rpytmat = rpy.with_mode(rpy.NO_CONVERSION, rpy.r.getTermSim)(terms, method=meth,verbose=False) tmat = rpy.r.assign("haha", rpytmat) print "\n Calculating Z matrix" Z = rpy_go.genego_sim(gene2goterms,gene_ids,terms,rpytmat,go_term_sim=go_term_sim,term_sim=meth) DZ = dataset.Dataset(Z, [['go-terms', terms], ['gene_ids', gene_ids]], name='Dz_'+str(meth)) # update data (X) matrix newind = DX.get_indices('gene_ids', gene_ids) Xr = DX.asarray()[:,newind] DXr = dataset.Dataset(Xr, [['cline', sample_ids], ['gene_ids', gene_ids]], name='Dxr') else: #DXr = dataset.read_ftsv(open('Xr.ftsv', 'r')) newind = DX.get_indices('gene_ids', gene_ids) Xr = DX.asarray()[:,newind] DXr = dataset.Dataset(Xr, [['cline', sample_ids], ['gene_ids', gene_ids]], name='Dxr') DY = dataset.read_ftsv(open('Y.ftsv', 'r')) DZ = dataset.read_ftsv(open('Z.ftsv', 'r')) Xr = DXr.asarray() Y = DY.asarray() Z = DZ.asarray() sample_ids = DX.get_identifiers('cline', sorted=True) # standardize Z? sdtz = False if sdtz: DZ._array = DZ._array/Dz._array.std(0) sdty = False if sdty: DY._array = DY._array/DY._array.std(0) # ##### PLS ONLY, CHECK FOR SIMILARITY BETWEEN W and Z ####### if bevel_check: Xr = DXr.asarray() Y = DY.asarray() from pylab import figure, scatter, xlabel, subplot,xticks,yticks Xrcc = Xr - Xr.mean(0) - Xr.mean(1)[:,newaxis] + Xr.mean() Zcc = Z - Z.mean(0) - Z.mean(1)[:,newaxis] + Z.mean() Yc = Y - Y.mean(0) xy_pls_result = pls(Xrcc, Yc, a_max) xz_pls_result = pls(Xrcc.T, Zcc.T, a_max) # check for linearity between scores of xz-result and W of xy-result Wxy = xy_pls_result['W'] Txz = xz_pls_result['T'] figure() n = 0 for i in range(a_max): w = Wxy[:,i] for j in range(a_max): n += 1 t = Txz[:,j] r2 = stats.corrcoef(w, t)[0,-1] subplot(a_max, a_max, n) scatter(w, t) xticks([]) yticks([]) xlabel('(Wxy(%d), Tzx(%d)), r2: %.1f ' %(i+1,j+1,r2)) # ####### LPLSR ######## if save_calc and not data_cached: print "Saving calculations" import cPickle fh = open("g2go_s.pkl", "w") cPickle.dump(gene2goterms, fh) fh.close() dataset.write_ftsv(open('Xs.ftsv', 'w'), DXr, decimals=7) dataset.write_ftsv(open('Ysg.ftsv', 'w'), DY, decimals=7) dataset.write_ftsv(open('Yspy.ftsv', 'w'), DYr, decimals=7) dataset.write_ftsv(open('Zs.ftsv', 'w'), DZ, decimals=7) def read_calc(): import cPickle fh = open("g2go_s.pkl") gene2goterms = cPickle.load(fh) fh.close() DXr = dataset.read_ftsv('Xu.ftsv') DY = dataset.read_ftsv('Yu.ftsv') DYr = dataset.read_ftsv('Ydu.ftsv') DZ = dataset.read_ftsv('Zu.ftsv') return DXr, DY, DYr, DZ, gene2goterms print "LPLSR ..." lpls_result = nipals_lpls(Xr,Y,Z, a_max,alpha=xz_alpha, center_axis=center_axis, zorth=zorth) globals().update(lpls_result) # Correlation loadings dx,Rx,rssx = correlation_loadings(Xr, T, P) dx,Ry,rssy = correlation_loadings(Y, T, Q) cadz,Rz,rssz = correlation_loadings(Z.T, W, L) # Prediction error if calc_rmsep: rmsep , yhat, class_error = pyblm.crossvalidation.lpls_val(Xr, Y, Z, a_max, alpha=xz_alpha,center_axis=center_axis, nsets=nsets,zorth=zorth) Alpha = arange(0.0, 1.01, .05) if alpha_check: Rmsep,Yhat, CE = [],[],[] for a in Alpha: print "alpha %f" %a rmsep_a , yhat, ce = pyblm.lpls_val(Xr, Y, Z, a_max, alpha=a, center_axis=center_axis,nsets=nsets, zorth=zorth) Rmsep.append(rmsep_a.copy()) Yhat.append(yhat.copy()) CE.append(ce.copy()) Rmsep = asarray(Rmsep) Yhat = asarray(Yhat) #CE = asarray(CE) random_alpha_check = True if random_alpha_check: n_zrand = 100 RMS,YHAT, CEE = [],[],[] zindex = arange(Z.shape[1]) for ii in range(n_zrand): zind_rand = zindex.copy() random.shuffle(zind_rand) Zrand = Z[:,zind_rand] #Alpha = arange(0.0, 1.1, .25) Rmsep_r,Yhat_r, CE_r = [],[],[] for a in Alpha: print "Iter: %d alpha %.2f" %(ii, a) rmsep , yhat, ce = pyblm.lpls_val(Xr, Y, Zrand, a_max, alpha=a,center_axis=center_axis,nsets=nsets, zorth=zorth) Rmsep_r.append(rmsep.copy()) Yhat_r.append(yhat.copy()) CE_r.append(ce.copy()) RMS.append(Rmsep_r) YHAT.append(Yhat_r) CEE.append(CE_r) RMS = asarray(RMS) YHAT = asarray(YHAT) CEE = asarray(CEE) # Significance Hotellings T calc_qvals = True if not calc_qvals: Wx, Wz = pyblm.crossvalidation.lpls_jk(Xr, Y, Z, aopt, center_axis=center_axis, xz_alpha=xz_alpha, nsets=nsets) Ws = W*apply_along_axis(norm, 0, T) Ws = Ws[:,:aopt] cal_tsq_x = pyblm.statistics.hotelling(Wx, Ws[:,:aopt], alpha=w_alpha) Ls = L*apply_along_axis(norm, 0, K) cal_tsq_z = pyblm.statistics.hotelling(Wz, Ls[:,:aopt], alpha=0.01) # qvals if calc_qvals: cal_tsq_z, pert_tsq_z, cal_tsq_x, pert_tsq_x = pyblm.lpls_qvals(Xr, Y, Z, aopt=aopt, zx_alpha=xz_alpha, n_iter=n_iter, nsets=nsets) qvalz = pyblm.statistics._fdr(cal_tsq_z, pert_tsq_z, median) qvalx = pyblm.statistics._fdr(cal_tsq_x, pert_tsq_x, median) # p-values, set-enrichment analysis active_genes_ids = where(qvalx < qval_cutoff)[0] active_genes = [name for i,name in enumerate(gene_ids) if i in active_genes_ids] active_universe = gene_ids gsea_result, gsea_params= rpy_go.gene_GO_hypergeo_test(genelist=active_genes,universe=active_universe,chip=chip,pval_cutoff=1.0,cond=False,test_direction="over") active_goterms_ids = where(qvalz < qval_cutoff)[0] active_goterms = [name for i,name in enumerate(terms) if i in active_goterms_ids] gsea_t2p = dict(zip(gsea_result['GOBPID'], gsea_result['Pvalue'])) #### PLOTS #### from pylab import * from scipy import where dg = plots_lpls.dag(terms, "bp") pos = None if calc_qvals: figure(300) subplot(2,1,1) pos = plots_lpls.plot_dag(dg, node_color=cal_tsq_z, pos=pos, nodelist=terms) ax = gca() colorbar(ax.collections[0]) xlabel('q values') xticks([]) yticks([]) subplot(2,1,2) pos = plot_dag(dg, node_color=qvalz, pos=pos, nodelist=terms) ax = gca() colorbar(ax.collections[0]) xlabel('T2 values') else: figure(300) subplot(2,1,1) pos = plots_lpls.plot_dag(dg, pos=pos, nodelist=terms) if calc_rmsep: figure(190) #rmsep bar_col = 'rgbcmyk'*2 m = Y.shape[1] bar_w = 1./(m + 2.) for a in range(m): bar(arange(a_max)+a*bar_w+.1, rmsep[a,:], width=bar_w, color=bar_col[a]) ylim([rmsep.min()-.05, rmsep.max()+.05]) title('RMSEP: Y(%s)' %DY.get_name()) #figure(2) #for a in range(m): # bar(arange(a_max)+a*bar_w+.1, class_error[:,a], width=bar_w, color=bar_col[a]) #ylim([class_error.min()-.05, class_error.max()+.05]) #title('Classification accuracy') figure(5) # Hyploid correlations pc1 = 2 pc2 = 3 tsqz = cal_tsq_z tsqx = cal_tsq_x tsqz_s = 550*tsqz/tsqz.max() td = rpy_go.goterm2desc(terms) tlabels = [td[i] for i in terms] #keep = tsqz.argsort()[:100] #k_Rz = Rz[keep,:] #k_tsqz_s = tsqz_s[keep] #k_tsq = tsqz[keep] #k_tlabels = [name for i,name in enumerate(tlabels) if i in keep] plot_corrloads(Rz, pc1=pc1, pc2=pc2, s=tsqz_s, c=tsqz, zorder=6, expvar=evz, ax=None,alpha=.9,labels=None) #plot_corrloads(k_Rz, pc1=0, pc2=1, s=k_tsqz_s, c=k_tsqz, zorder=5, expvar=evz, ax=None,alpha=.5,labels=None) ax = gca() ylabels = DYg.get_identifiers(DYg.get_dim_name()[1], sorted=True) #ylabels = DYr.get_identifiers(DYr.get_dim_name()[1], sorted=True) #blabels = yglabels[:] #blabels.append(ylabels[0]) plot_corrloads(Ry, pc1=pc1, pc2=pc2, s=350, c='g', marker='s', zorder=7, expvar=evy, ax=ax,labels=ylabels,alpha=1.0, drawback=False) plot_corrloads(Rx, pc1=pc1, pc2=pc2, s=3, c=(.6,.6,.6), alpha=1, zorder=4, expvar=evx, ax=ax, drawback=False, faceted=False) figure(4) subplot(221) ax = gca() plot_corrloads(Rx, pc1=0, pc2=1, s=tsqx/2.0, c='b', zorder=5, expvar=evx, ax=ax) # title('X correlation') subplot(222) ax = gca() plot_corrloads(Ry, pc1=0, pc2=1, s=250, c='g', zorder=5, expvar=evy, ax=ax) #title('Y correlation') subplot(223) ax = gca() plot_corrloads(Rz, pc1=0, pc2=1, s=tsqz/10.0, c='r', zorder=5, expvar=evz, ax=ax) #title('Z correlation') subplot(224) plot(arange(len(evx)), evx, 'b', label='X', linewidth=2) plot(evy, 'g', label='Y', linewidth=2) plot(evz, 'r', label='Z', linewidth=2) legend(loc=2) ylabel('Explained variance') xlabel('Component') xticks((arange(len(evx))), [str(int(i+1)) for i in arange(len(evx))]) show() figure(19) #subplot(1,2,1) # RMS : (n_rand_iter, n_alpha, nvarY, a_max) # Rmsep : (n_alpha, nvarY, a_max) rms = RMS[:,:,:,aopt] # looking at solution at aopt m_rms = rms.mean(2) # mean over all y-variables mm_rms = m_rms.mean(0) # mean over iterations std_mrms = m_rms.std(0) # standard deviation over iterations rms_t = Rmsep[:,:,aopt] m_rms_t = rms_t.mean(1) xax = arange(mm_rms.shape[0]) std2_lim_down = mm_rms - 1.*std_mrms std2_lim_up = mm_rms + 1.*std_mrms xx = r_[xax, xax[::-1]] yy = r_[std2_lim_down, std2_lim_up[::-1]] fill(xx, yy, fc='.9') plot(mm_rms, '--r', lw=1.5, label='Perm. mean') plot(std2_lim_down, 'b--') plot(std2_lim_up, 'b--', label='Perm. 2*std') plot(m_rms_t, 'g', lw=1.5, label='True') #c_ylim = ylim() #ylim(c_ylim[0], c_ylim[1]-1) alpha_ind = linspace(0, Alpha.shape[0]-1, 11) xticks(alpha_ind, ['%.1f' %a for a in arange(0,1.01, .1)]) xlabel(r'$\alpha$') ylabel('mean error') leg = legend(loc=2) # delete fill from legend del leg.texts[-1] del leg.legendHandles[-1] # delete one of the std legends del leg.texts[1] del leg.legendHandles[1] klass = True if klass: figure(20) # subplot(1,2,1) # RMS : (n_rand_iter, n_alpha, nvarY, a_max) # Rmsep : (n_alpha, nvarY, a_max) cee = CEE[:,:,aopt,:] # looking at solution at aopt m_cee = cee.mean(-1) # mean over all y-variables mm_cee = m_cee.mean(0) # mean over iterations std_cee = m_cee.std(0) # standard deviation over iterations CE = asarray(CE) cee_t = CE[:,:,aopt] m_cee_t = cee_t.mean(1) xax = arange(mm_cee.shape[0]) std2_lim_down = mm_cee - 2*std_cee std2_lim_up = mm_cee + 2*std_cee xx = r_[xax, xax[::-1]] yy = r_[std2_lim_down, std2_lim_up[::-1]] fill(xx, yy, fc='.9') plot(mm_cee, '--r', lw=1.5) plot(std2_lim_down, 'b--') plot(std2_lim_up, 'b--') plot(m_cee_t, 'g', lw=1.5) c_ylim = ylim() ylim = ylim(c_ylim[0], .2) xticks(xax, [str(a)[:3] for a in Alpha]) xlabel(r'$\alpha$') ylabel('mean error')