Projects/laydi
Projects
/
laydi
Archived
7
0
Fork 0
This repository has been archived on 2024-07-04. You can view files and clone it, but cannot push or open issues or pull requests.
laydi/fluents/lib/blmfuncs.py

1379 lines
50 KiB
Python

"""This module contains bilinear models(Functions)
"""
import os
import copy
import pygtk
import gtk
import gtk.glade
from fluents import fluents
from fluents.workflow import Function, OptionsDialog, Options
from fluents.dataset import Dataset
from fluents import plots, dataset, workflow, logger
import scipy
from engines import pca, pls, nipals_lpls
from cx_stats import leverage, variances, hotelling
from cx_utils import mat_center
from validation import *
from packer import Packer
import blmplots
class NewStyleModel:
def __init__(self, id='johndoe', name='JohnDoe'):
self.id = id
self.name = name
self.name = name
self.options = Options
self.input_data = []
self.parts = {}
self.io_table = {}
self.datasets = []
self.plots = []
def create_dataset(self, param, Dataset=Dataset):
for ds in self.datasets:
if ds.get_name()==param: return ds
if not param in self.parts.keys():
logger.log('notice', 'Parameter: %s not present' %param)
return
if not param in self.io_table.keys():
logger.log('notice', 'Parameter: %s not in defined in io table' %param)
identifiers = self.io_table.get(param)
data = self.parts.get(param)
ds = Dataset(data, identifiers=identifiers, name=param)
self.datasets.append(dataset)
return ds
def create_plot(self, blmplot):
if blmplot.validate_model(self.parts):
plt = blmplot(self.parts)
self.plots.append(plt)
return plt
def save(self, stype=None):
pass
def load(self):
pass
class Model(Function):
"""Base class of bilinear models.
"""
def __init__(self, id='johndoe', name='JohnDoe'):
Function.__init__(self, id, name)
self.name = name
self._options = None
self._data = {}
self._dataset = {}
self._packers = {}
self.model = {}
def clear(self):
""" Clears model paramters
"""
self.model = {}
self._data = {}
self._packers = {}
class PCA(Model):
def __init__(self, id='pca', name='PCA'):
Model.__init__(self, id, name)
self._options = PcaOptions()
def validation(self, amax, cv_val_sets, pert_val_sets, cv_val_method, pert_val_method):
"""Model validation and estimate of optimal numer of components.
"""
if self._options['calc_cv']:
if cv_val_method == 'random':
sep, aopt = pca_cv_val(self.model['E0'], amax, cv_val_sets)
self.model['sep'] = sep
if self._options['calc_pert']:
if pert_val_method == 'random_diag':
sep, aopt = pca_alter_val(self.model['E0'], amax, pert_val_sets)
self.model['sep'] = sep
if self._options['calc_cv']==False and self._options['calc_pert']==False:
self.model['sep'] = None
aopt = self._options['amax']
if self._options['auto_aopt']:
logger.log("notice", "Auto aopt: " + str(aopt))
self._options['aopt'] = aopt
if aopt==1:
logger.log('notice', 'Aopt at first component!')
def confidence(self, aopt, n_sets, alpha, p_center,
crot, strict, cov_center):
"""Returns a confidence measure for model parameters.
Based on aopt.
Added model parts: p_tsq
"""
if aopt<2:
aopt = 2
logger.log('notice','Hotellings T2 needs more than 1 comp.\n switching to 2!!')
jk_segments = pca_jkP(self.model['E0'], aopt, n_sets)
Pcal = self.model['P'][:,:aopt]
# ensure scaled P
tnorm = scipy.apply_along_axis(norm, 0, self.model['T'][:,:aopt])
Pcal = Pcal*tnorm
tsq = hotelling(jk_segments, Pcal, p_center,
cov_center, alpha, crot, strict)
self.model['p_tsq'] = tsq
def make_model(self, amax, mode, scale):
"""Model on optimal number of components.
"""
dat = pca(self.model['E0'], amax, scale, mode)
self.model.update(dat)
def as_dataset(self, param, dtype='dataset'):
"""Return model parameter as Dataset.
"""
if not param in self.model.keys():
logger.log('notice', 'Parameter: %s not in model' %param)
return
DX = self._dataset['X'] #input dataset
dim_name_0, dim_name_1 = DX.get_dim_name()
# samples
ids_0 = [dim_name_0, DX.get_identifiers(dim_name_0, sorted=True)]
# vars
ids_1 = [dim_name_1, DX.get_identifiers(dim_name_1, sorted=True)]
# components (hidden)
pc_ids = ['_amax', map(str,range(self._options['amax'])) ]
pc_ids_opt = ['_aopt', map(str, range(self._options['aopt'])) ]
zero_dim = ['_doe', ['0']] # null dim, vector (hidden)
match_ids = {'E' : [ids_0, ids_1],
'E0' : [ids_0, ids_1],
'P' : [ids_1, pc_ids],
'T' : [ids_0, pc_ids],
'W' : [ids_1, pc_ids],
'p_tsq' : [ids_1, zero_dim],
'rmsep' : [pc_ids, zero_dim],
'var_leverages' : [ids_1, zero_dim],
'sample_leverages' : [pc_ids, zero_dim],
'exp_var_x' : [pc_ids, zero_dim],
'var_x' : [pc_ids, zero_dim],
'eigvals' : [pc_ids, zero_dim]
}
out = Dataset(self.model[param], match_ids[param], name=param)
return out
def get_out_plots(self, options):
out=[]
for plt in options['out_plots']:
#try:
out.append(plt(self))
#except:
# logger.log('debug', 'Plot: %s failed') %str(plt)
return out
def run_o(self, data):
"""Run pca with present options.
"""
self.clear()
options = self._options
self._dataset['X'] = data
self._data['X'] = data.asarray().astype('<f8')
if options['center']:
center = options['center_mth']
self.model['E0'] = center(self._data['X'])
else:
self.model['E0'] = data.asarray()
self.validation(**options.validation_options())
self.model['aopt'] = self._options['aopt']
self.make_model(**options.make_model_options())
if options['calc_conf']:
self.confidence(**options.confidence_options())
out = [self.as_dataset(p) for p in options['out_data']]
for plt in self.get_out_plots(options):
out.append(plt)
return out
def run(self, data):
"""Run Pca with option gui.
"""
dialog = PcaOptionsDialog([data], self._options)
dialog.show_all()
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
# set output data and plots
dialog.set_output()
#run with current data and options
return self.run_o(data)
class PLS(Model):
def __init__(self, id='pls', name='PLS'):
Model.__init__(self, id, name)
self._options = PlsOptions()
def validation(self):
"""Returns rmsep for pls model.
"""
m, n = self.model['E0'].shape
if m>n:
val_engine = w_pls_cv_val
else:
val_engine = pls_val
if self._options['calc_cv']==True:
print "Doing RMSEP calc ..."
rmsep, aopt = val_engine(self.model['E0'], self.model['F0'],
self._options['amax'], self._options['n_sets'])
self.model['rmsep'] = rmsep[:,:-1]
self.model['aopt'] = aopt
else:
self.model['rmsep'] = None
self.model['aopt'] = self._options['aopt']
def confidence(self, aopt, n_sets, alpha, p_center,
crot, strict, cov_center ):
"""Returns a confidence measure for model parameters
Supported parameters: W
"""
aopt = self.model['aopt']
if self._options['calc_conf']:
print "Doing Tsq"
jk_segments = pls_jkW(self.model['E0'], self.model['F0'],
aopt, n_sets)
Wcal = self.model['W'][:,:aopt]
# ensure that Wcal is scaled
tnorm = scipy.apply_along_axis(norm, 0, self.model['T'][:,:aopt])
Wcal = Wcal*tnorm
tsq = hotelling(jk_segments, Wcal, p_center,
alpha, crot, strict, cov_center)
self.model['w_tsq'] = tsq
else:
self.model['w_tsq'] = None
def permutation_confidence(self, a, b, aopt, reg,
n_iter, algo, sim_method):
"""Estimates cut off on significant vars by controlling fdr."""
if self._options['calc_qvals']==True:
print "Doing Qvals"
qvals = pls_qvals(a, b,
aopt=None,
alpha=reg,
n_iter=n_iter,
algo='pls',
sim_method=sim_method)
self.model['qval'] = qvals
#self.model['qval_sorted'] = qvals_sorted
else:
self.model['qval'] = None
self.model['qval_sorted'] = None
def make_model(self, a, b, amax, scale, mode, engine):
"""Make model on amax components.
"""
print "Making model"
dat = engine(a, b, amax, scale, mode)
self.model.update(dat)
def as_dataset(self, name, dtype='Dataset'):
"""Return any model parameter as Dataset
No ids matching
"""
if name not in self.model.keys():
return
DX, DY = self._dataset['X'], self._dataset['Y']
dim_name_0, dim_name_1 = DX.get_dim_name()
dim_name_2, dim_name_3 = DY.get_dim_name()
#samples
ids_0 = [dim_name_0, DX.get_identifiers(dim_name_0, sorted=True)]
# x vars
ids_1 = [dim_name_1, DX.get_identifiers(dim_name_1, sorted=True)]
# y vars
ids_3 = [dim_name_3, DY.get_identifiers(dim_name_3, sorted=True)]
# components (hidden)
pc_ids = ['_comp', map(str, range(self._options['amax']))]
pc_ids_opt = ['_comp', map(str, range(self.model['aopt']))]
zero_dim = ['_doe',['0']] # null dim, vector (hidden)
match_ids = {'E' : [ids_0, ids_1],
'P' : [ids_1, pc_ids],
'T' : [ids_0, pc_ids],
'W' : [ids_1, pc_ids],
'R' : [ids_1, pc_ids],
'Q' : [ids_3, pc_ids],
'F' : [ids_0, ids_3],
'B' : [ids_1, ids_3],
'qval' : [ids_1, zero_dim],
'qval_sorted':[ids_1, zero_dim],
'w_tsq' : [ids_1, zero_dim],
'rmsep' : [ids_3, pc_ids],
'CP': [ids_1, pc_ids]
}
array = self.model[name]
M = Dataset(array, identifiers=match_ids[name], name=name)
return M
def get_out_plots(self, options):
out=[]
for plt in options['out_plots']:
#try:
out.append(plt(self))
#except:
# logger.log('debug', 'Plot: %s failed' %plt)
return out
def run_o(self, a, b):
"""Run PLS with present options."""
options = self._options
self._dataset['X'] = a
self._dataset['Y'] = b
self._data['X'] = a.asarray()
self._data['Y'] = b.asarray()
if options['center']:
self.model['E0'] = options['center_mth'](self._data['X'])
self.model['F0'] = options['center_mth'](self._data['Y'])
else:
self.model['E0'] = self._data['X']
self.model['F0'] = self._data['Y']
self.validation()
self.make_model(self.model['E0'], self.model['F0'],
**options.make_model_options())
if options['calc_conf']:
self.confidence(**options.confidence_options())
out = [self.as_dataset(p) for p in options['out_data']]
for plt in self.get_out_plots(options):
out.append(plt)
return out
def run(self, a, b):
"""Run PLS with option gui.
"""
dialog = PlsOptionsDialog([a, b], self._options)
dialog.show_all()
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
# set output data and plots
dialog.set_output()
#run with current data and options
return self.run_o(a, b)
class LPLS(Model):
def __init__(self, id='lpls', name='LPLS'):
Model.__init__(self, id, name)
self._options = LplsOptions()
def validation(self, opt):
"""Returns rmsep for lpls model.
"""
if opt['calc_cv']==True:
val_engine = opt['val_engine']
rmsep, aopt = val_engine(self.model['X'], self.model['Y'],
self.model['Z'], opt['amax'], opt['n_sets'], opt['xz_alpha'])
self.model['rmsep'] = rmsep
self.model['aopt'] = aopt
else:
self.model['rmsep'] = None
self.model['aopt'] = opt['aopt']
def confidence(self, opt):
"""Returns a confidence measure for model parameters
Supported parameters: W
"""
aopt = self.model['aopt']
if opt['calc_conf']:
Wx, Wz = lpls_jk(self._data['X'], self._data['Y'], self._data['Z'], aopt, opt['n_sets'], opt['xz_alpha'])
Wcal = self.model['W'][:,:aopt]
Lcal = self.model['L'][:,:aopt]
# ensure that Wcal is scaled
tnorm = scipy.apply_along_axis(norm, 0, self.model['T'][:,:aopt])
Wcal = Wcal*tnorm
a,b,c,d,e = opt['p_center'], opt['crot'], opt['alpha'], opt['strict'], opt['cov_center']
tsqx = hotelling(Wx, Wcal, a,b,c,d,e)
tsqz = hotelling(Wz, Lcal, a,b,c,d,e)
self.model['tsqx'] = tsqx
self.model['tsqz'] = tsqz
else:
self.model['tsqx'] = None
self.model['tsqz'] = None
def permutation_confidence(self, opt):
"""Estimates cut off on significant vars by controlling fdr.
"""
self.model['qval'] = None
self.model['qval_sorted'] = None
def make_model(self, opt):
"""Make model on amax components.
"""
engine = opt['engine']
dat = engine(self._data['X'], self._data['Y'], self._data['Z'],
opt['amax'], opt['xz_alpha'], opt['center_mth'],
opt['scale'], False)
self.model.update(dat)
def as_dataset(self, name, dtype='Dataset'):
"""Return any model parameter as Dataset
No ids matching
"""
if name not in self.model.keys():
raise ValueError("There is no key named: %s in model" %name)
DX, DY, DZ = self._dataset['X'], self._dataset['Y'], self._dataset['Z']
dim_name_0, dim_name_1 = DX.get_dim_name()
dim_name_2, dim_name_3 = DY.get_dim_name()
dim_name_4, dim_name_5 = DZ.get_dim_name()
#samples
ids_0 = [dim_name_0, DX.get_identifiers(dim_name_0, sorted=True)]
# x vars (genes)
ids_1 = [dim_name_1, DX.get_identifiers(dim_name_1, sorted=True)]
# y vars (sample descriptors)
ids_3 = [dim_name_3, DY.get_identifiers(dim_name_3, sorted=True)]
#z-vars (variable descriptors)
ids_4 = [dim_name_4, DZ.get_identifiers(dim_name_4, sorted=True)]
# components (hidden)
pc_ids = ['_comp', map(str, range(self._options['amax']))]
pc_ids_opt = ['_comp', map(str, range(self.model['aopt']))]
zero_dim = ['_doe',['0']] # null dim, vector (hidden)
match_ids = {'E' : [ids_0, ids_1],
'P' : [ids_1, pc_ids],
'T' : [ids_0, pc_ids],
'U' : [ids_0, pc_ids],
'W' : [ids_1, pc_ids],
'L' : [ids_4, pc_ids],
'Q' : [ids_3, pc_ids],
'F' : [ids_0, ids_3],
'B' : [ids_1, ids_3],
'K' : [ids_1, pc_ids],
'tsqx' : [ids_1, zero_dim],
'tsqz' : [ids_4, zero_dim],
'rmsep' : [ids_3, pc_ids],
'Rx' : [ids_1, pc_ids],
'Rz' : [ids_4, pc_ids],
'evx' : [ids_1, zero_dim],
'evy' : [ids_3, zero_dim],
'evz' : [ids_4, zero_dim]
}
try:
array = self.model[name]
ids = match_ids[name]
except:
raise ValueError("There are no identifers stored for: %s in model" %name)
M = Dataset(array, identifiers=ids, name=name)
return M
def get_out_plots(self, options):
out=[]
for plt in options['out_plots']:
out.append(plt(self))
return out
def run(self, a, b, c):
"""Run L-PLS with present options."""
options = self._options
self._dataset['X'] = a
self._dataset['Y'] = b
self._dataset['Z'] = c
self._data['X'] = a.asarray()
self._data['Y'] = b.asarray()
self._data['Z'] = c.asarray()
self.validation(options)
self.make_model(options)
if options['calc_conf']:
self.confidence(options)
out = [self.as_dataset(p) for p in options['out_data']]
for plt in self.get_out_plots(options):
out.append(plt)
return out
def run_gui(self, a, b, c):
"""Run LPLS with option gui.
"""
dialog = LPlsOptionsDialog([a, b, c], self._options)
dialog.show_all()
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
# set output data and plots
dialog.set_output()
#run with current data and options
return self.run(a, b, c)
class PcaOptions(Options):
"""Options for Principal Component Analysis.
"""
def __init__(self):
Options.__init__(self)
self._set_default()
def _set_default(self):
opt = {}
opt['algo'] = 'pca'
opt['engine'] = pca
opt['mode'] = 'normal' # how much info to calculate
opt['amax'] = 10
opt['aopt'] = 5
opt['auto_aopt'] = False
opt['center'] = True
opt['center_mth'] = mat_center
opt['scale'] = 'scores'
opt['calc_conf'] = False
opt['n_sets'] = 7
opt['strict'] = True
opt['p_center'] = 'med'
opt['alpha'] = .2
opt['cov_center'] = 'med'
opt['crot'] = True
opt['calc_cv'] = False
opt['calc_pert'] = False
opt['pert_val_method'] = 'random_diag'
opt['cv_val_method'] = 'random'
opt['cv_val_sets'] = 10
opt['pert_val_sets'] = 10
opt['all_data'] = [('T', 'scores', True),
('P', 'loadings', True),
('E','residuals', False),
('p_tsq', 't2', False),
('rmsep', 'RMSEP', False)
]
# (class, name, sensitive, ticked)
opt['all_plots'] = [(blmplots.PcaScorePlot, 'Scores', True),
(blmplots.PcaLoadingPlot, 'Loadings', True),
(blmplots.LineViewXc, 'Line view', True),
(blmplots.PredictionErrorPlot, 'Residual Error', False),
(blmplots.PcaScreePlot, 'Scree', True)
]
opt['out_data'] = ['T','P', 'p_tsq']
opt['out_plots'] = [blmplots.PcaScorePlot,
blmplots.PcaLoadingPlot,
blmplots.LineViewXc,
blmplots.PcaScreePlot]
self.update(opt)
def make_model_options(self):
"""Options for make_model method."""
opt_list = ['scale', 'mode', 'amax']
return self._copy_from_list(opt_list)
def confidence_options(self):
"""Options for confidence method."""
opt_list = ['n_sets', 'aopt', 'alpha', 'p_center',
'strict', 'crot', 'cov_center']
return self._copy_from_list(opt_list)
def validation_options(self):
"""Options for pre_validation method."""
opt_list = ['amax', 'cv_val_sets', 'pert_val_sets',
'cv_val_method', 'pert_val_method']
return self._copy_from_list(opt_list)
class PlsOptions(Options):
"""Options for Partial Least Squares Regression.
"""
def __init__(self):
Options.__init__(self)
self._set_default()
def _set_default(self):
opt = {}
opt['algo'] = 'pls'
opt['engine'] = pls
opt['mode'] = 'normal' # how much info to calculate
opt['amax'] = 10
opt['aopt'] = 10
opt['auto_aopt'] = False
opt['center'] = True
opt['center_mth'] = mat_center
opt['scale'] = 'scores'
opt['calc_conf'] = True
opt['n_sets'] = 7
opt['strict'] = True
opt['p_center'] = 'med'
opt['alpha'] = .2
opt['cov_center'] = 'med'
opt['crot'] = True
opt['calc_cv'] = False
opt['cv_val_method'] = 'random'
opt['cv_val_sets'] = opt['n_sets']
opt['all_data'] = [('T', 'scores', True),
('P', 'loadings', True),
('E','residuals', False),
('w_tsq', 't2', True),
('rmsep', 'RMSEP', False)
]
# (class, name, sensitive, ticked)
opt['all_plots'] = [(blmplots.PlsScorePlot, 'Scores', True),
(blmplots.PlsXLoadingPlot, 'X-Loadings', True),
(blmplots.PlsYLoadingPlot, 'Y-Loadings', True),
(blmplots.LineViewXc, 'Line view', True),
(blmplots.PredictionErrorPlot, 'Residual Error', False),
(blmplots.RMSEPPlot, 'RMSEP', False),
(blmplots.PlsCorrelationLoadingPlot, 'Corr. loadings', False)
]
opt['out_data'] = ['T','P', 'w_tsq']
opt['out_plots'] = [blmplots.PlsScorePlot,blmplots.PlsXLoadingPlot,blmplots.PlsYLoadingPlot,blmplots.LineViewXc]
#opt['out_data'] = None
opt['pack'] = True
opt['calc_qvals'] = False
opt['q_pert_method'] = 'shuffle_rows'
opt['q_iter'] = 20
self.update(opt)
def make_model_options(self):
"""Options for make_model method."""
opt_list = ['scale','mode', 'amax', 'engine']
return self._copy_from_list(opt_list)
def confidence_options(self):
"""Options for confidence method."""
opt_list = ['n_sets', 'aopt', 'alpha', 'p_center',
'strict', 'crot', 'cov_center']
return self._copy_from_list(opt_list)
def validation_options(self):
opt_list = ['amax', 'n_sets', 'cv_val_method']
return self._copy_from_list(opt_list)
def permutation_confidence(self):
opt_list = ['q_pert_method', 'q_iter']
return self._copy_from_list(opt_list)
class LplsOptions(Options):
"""Options for L-shaped Partial Least Squares Regression.
"""
def __init__(self):
Options.__init__(self)
self._set_default()
def _set_default(self):
opt = {}
opt['engine'] = nipals_lpls
opt['mode'] = 'normal' # how much info to calculate
opt['amax'] = 10
opt['aopt'] = 3
opt['xz_alpha'] = 0.6
opt['auto_aopt'] = False
opt['center'] = True
opt['center_mth'] = [2, 0, 2]
opt['scale'] = 'scores'
opt['calc_conf'] = False
opt['n_sets'] = 7
opt['strict'] = False
opt['p_center'] = 'med'
opt['alpha'] = .3
opt['cov_center'] = 'med'
opt['crot'] = True
opt['calc_cv'] = False
opt['cv_val_method'] = 'random'
opt['cv_val_sets'] = opt['n_sets']
opt['all_data'] = [('T', 'scores', True),
('Wx', 'X-weights', True),
('Wz', 'Z-weights', True),
('E','residuals', False),
('tsq_x', 't2X', False),
('rmsep', 'RMSEP', False)
]
# (class, name, sensitive, ticked)
opt['all_plots'] = [(blmplots.LplsScorePlot, 'Scores', True),
(blmplots.LplsXLoadingPlot, 'Loadings', True),
(blmplots.LineViewXc, 'Line view', True),
(blmplots.LplsHypoidCorrelationPlot, 'Hypoid corr.', False),
(blmplots.LplsXCorrelationPlot, 'X corr.', True),
(blmplots.LplsZCorrelationPlot, 'Z corr.', True)
]
opt['out_data'] = ['T','P','L','K', 'tsqx', 'tsqz']
opt['out_plots'] = [blmplots.LplsScorePlot,
blmplots.LplsXLoadingPlot,
blmplots.LplsZLoadingPlot,
blmplots.LplsXCorrelationPlot,
blmplots.LplsZCorrelationPlot,
blmplots.LineViewXc,
blmplots.LplsExplainedVariancePlot]
#opt['out_data'] = None
opt['pack'] = False
opt['calc_qvals'] = False
opt['q_pert_method'] = 'shuffle_rows'
opt['q_iter'] = 20
self.update(opt)
def make_model_options(self):
"""Options for make_model method."""
opt_list = ['scale','mode', 'amax', 'engine']
return self._copy_from_list(opt_list)
def confidence_options(self):
"""Options for confidence method."""
opt_list = ['n_sets', 'aopt', 'alpha', 'p_center',
'strict', 'crot', 'cov_center']
return self._copy_from_list(opt_list)
def validation_options(self):
"""Options for pre_validation method."""
opt_list = ['amax', 'n_sets', 'cv_val_method']
return self._copy_from_list(opt_list)
class PcaOptionsDialog(OptionsDialog):
"""Options dialog for Principal Component Analysis.
"""
def __init__(self, data, options, input_names=['X']):
OptionsDialog.__init__(self, data, options, input_names)
glade_file = os.path.join(fluents.DATADIR, 'pca_options.glade')
notebook_name = "vbox1"
page_name = "Options"
self.add_page_from_glade(glade_file, notebook_name, page_name)
# connect signals to handlers
dic = {"on_amax_value_changed" : self.on_amax_changed,
"on_aopt_value_changed" : self.on_aopt_changed,
"auto_aopt_toggled" : self.auto_aopt_toggled,
"center_toggled" : self.center_toggled,
#"on_scale_changed" : self.on_scale_changed,
"on_val_none" : self.val_toggled,
"on_val_cv" : self.cv_toggled,
"on_val_pert" : self.pert_toggled,
"on_cv_method_changed" : self.on_cv_method_changed,
"on_cv_sets_changed" : self.on_cv_sets_changed,
"on_pert_sets_changed" : self.on_pert_sets_changed,
"on_conf_toggled" : self.on_conf_toggled,
"on_subset_loc_changed" : self.on_subset_loc_changed,
"on_cov_loc_changed" : self.on_cov_loc_changed,
"on_alpha_changed" : self.on_alpha_changed,
"on_rot_changed" : self.on_rot_changed
}
self.wTree.signal_autoconnect(dic)
# set/ensure valid default values/ranges
#
amax_sb = self.wTree.get_widget("amax_spinbutton")
max_comp = min(data[0].shape) # max num of components
if self._options['amax']>max_comp:
logger.log('debug', 'amax default too large ... adjusting')
self._options['amax'] = max_comp
amax_sb.get_adjustment().set_all(self._options['amax'], 1, max_comp, 1, 0, 0)
# aopt spin button
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
if self._options['aopt']>self._options['amax']:
self._options['aopt'] = self._options['amax'] + 1 - 1
aopt_sb.get_adjustment().set_all(self._options['aopt'], 1, self._options['amax'], 1, 0, 0)
# scale
# scale_cb = self.wTree.get_widget("scale_combobox")
# scale_cb.set_active(0)
# validation frames
if self._options['calc_cv']==False:
cv_frame = self.wTree.get_widget("cv_frame")
cv_frame.set_sensitive(False)
if self._options['calc_pert']==False:
pert_frame = self.wTree.get_widget("pert_frame")
pert_frame.set_sensitive(False)
cv = self.wTree.get_widget("cv_method").set_active(0)
pm = self.wTree.get_widget("pert_method").set_active(0)
# confidence
if self._options['calc_conf']==True:
self.wTree.get_widget("subset_expander").set_sensitive(True)
else:
self.wTree.get_widget("subset_expander").set_sensitive(False)
cb = self.wTree.get_widget("subset_loc")
_m = {'med': 0, 'mean': 1, 'full_model': 2}
cb.set_active(_m.get(self._options['p_center']))
cb = self.wTree.get_widget("cov_loc")
_m = {'med': 0, 'mean': 1}
cb.set_active(_m.get(self._options['cov_center']))
hs = self.wTree.get_widget("alpha_scale")
hs.set_value(self._options['alpha'])
def on_amax_changed(self, sb):
logger.log("debug", "amax changed: new value: %s" %sb.get_value_as_int())
amax = sb.get_value_as_int()
# update aopt if needed
if amax<self._options['aopt']:
self._options['aopt'] = amax
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
aopt_sb.get_adjustment().set_all(self._options['aopt'], 1, amax, 1, 0, 0)
self._options['amax'] = sb.get_value_as_int()
def on_aopt_changed(self, sb):
aopt = sb.get_value_as_int()
self._options['aopt'] = aopt
def auto_aopt_toggled(self, tb):
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
if tb.get_active():
self._options['auto_aopt'] = True
aopt_sb.set_sensitive(False)
else:
self._options['auto_aopt'] = False
aopt_sb.set_sensitive(True)
def center_toggled(self, tb):
if tb.get_active():
self._options['center'] = True
else:
logger.log("debug", "centering set to False")
self._options['center'] = False
#def on_scale_changed(self, cb):
# scale = cb.get_active_text()
# if scale=='Scores':
# self._options['scale'] = 'scores'
# elif scale=='Loadings':
# self._options['scale'] = 'loads'
# else:
# raise IOError
def val_toggled(self, tb):
"""Callback for validation: None. """
cv_frame = self.wTree.get_widget("cv_frame")
pert_frame = self.wTree.get_widget("pert_frame")
cv_tb = self.wTree.get_widget("cv_toggle")
p_tb = self.wTree.get_widget("pert_toggle")
if tb.get_active():
self._options['calc_cv'] = False
self._options['calc_pert'] = False
cv_frame.set_sensitive(False)
pert_frame.set_sensitive(False)
cv_tb.set_sensitive(False)
p_tb.set_sensitive(False)
else:
p_tb.set_sensitive(True)
cv_tb.set_sensitive(True)
if p_tb.get_active():
pert_frame.set_sensitive(True)
self._options['calc_pert'] = True
if cv_tb.get_active():
cv_frame.set_sensitive(True)
self._options['calc_cv'] = True
def cv_toggled(self, tb):
cv_frame = self.wTree.get_widget("cv_frame")
if tb.get_active():
cv_frame.set_sensitive(True)
self._options['calc_cv'] = True
else:
cv_frame.set_sensitive(False)
self._options['calc_cv'] = False
def pert_toggled(self, tb):
pert_frame = self.wTree.get_widget("pert_frame")
if tb.get_active():
pert_frame.set_sensitive(True)
self._options['calc_pert'] = True
else:
pert_frame.set_sensitive(False)
self._options['calc_pert'] = False
def on_cv_method_changed(self, cb):
method = cb.get_active_text()
if method == 'Random':
self._options['cv_val_method'] = 'random'
def on_pert_method_changed(self, cb):
method = cb.get_active_text()
if method == 'Random diags':
self._options['pert_val_method'] = 'random_diag'
def on_cv_sets_changed(self, sb):
val = sb.get_value_as_int()
self._options['cv_val_sets'] = val
def on_pert_sets_changed(self, sb):
val = sb.get_value_as_int()
self._options['pert_val_sets'] = val
def on_conf_toggled(self, tb):
if tb.get_active():
self._options['calc_conf'] = False
self.wTree.get_widget("subset_expander").set_sensitive(False)
else:
self._options['calc_conf'] = True
self.wTree.get_widget("subset_expander").set_sensitive(True)
def on_subset_loc_changed(self, cb):
method = cb.get_active_text()
if method=='Full model':
self._options['p_center'] = 'full_model'
elif method=='Median':
self._options['p_center'] = 'med'
elif method=='Mean':
self._options['p_center'] = 'mean'
def on_cov_loc_changed(self, cb):
method = cb.get_active_text()
if method=='Median':
self._options['cov_center'] = 'med'
elif method=='Mean':
self._options['cov_center'] = 'mean'
def on_alpha_changed(self, hs):
self._options['alpha'] = hs.get_value()
def on_rot_changed(self, rg):
proc, strict = rg
if proc.get_active():
self._options['crot'] = True
else:
self._options['crot'] = True
self._options['strict'] = True
class LplsOptionsDialog(OptionsDialog):
"""Options dialog for L-shaped Partial Least squares regression.
"""
def __init__(self, data, options, input_names=['X', 'Y', 'Z']):
OptionsDialog.__init__(self, data, options, input_names)
glade_file = os.path.join(fluents.DATADIR, 'lpls_options.glade')
notebook_name = "vbox1"
page_name = "Options"
self.add_page_from_glade(glade_file, notebook_name, page_name)
# connect signals to handlers
dic = {"on_amax_value_changed" : self.on_amax_changed,
"on_aopt_value_changed" : self.on_aopt_changed,
"auto_aopt_toggled" : self.auto_aopt_toggled,
"center_toggled" : self.center_toggled,
#"on_scale_changed" : self.on_scale_changed,
"on_val_none" : self.val_toggled,
"on_val_cv" : self.cv_toggled,
"on_cv_method_changed" : self.on_cv_method_changed,
"on_cv_sets_changed" : self.on_cv_sets_changed,
"on_conf_toggled" : self.conf_toggled,
"on_subset_loc_changed" : self.on_subset_loc_changed,
"on_cov_loc_changed" : self.on_cov_loc_changed,
"on_alpha_changed" : self.on_alpha_changed,
"on_rot_changed" : self.on_rot_changed,
"on__toggled" : self.conf_toggled,
"on_qval_changed" : self.on_qval_changed,
"on_iter_changed" : self.on_iter_changed
}
self.wTree.signal_autoconnect(dic)
# set/ensure valid default values/ranges
#
amax_sb = self.wTree.get_widget("amax_spinbutton")
max_comp = min(data[0].shape) # max num of components
if self._options['amax']>max_comp:
logger.log('debug', 'amax default too large ... adjusting')
self._options['amax'] = max_comp
amax_sb.get_adjustment().set_all(self._options['amax'], 1, max_comp, 1, 0, 0)
# aopt spin button
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
if self._options['aopt']>self._options['amax']:
self._options['aopt'] = self._options['amax'] + 1 - 1
aopt_sb.get_adjustment().set_all(self._options['aopt'], 1, self._options['amax'], 1, 0, 0)
# scale
# scale_cb = self.wTree.get_widget("scale_combobox")
# scale_cb.set_active(0)
# validation frames
if self._options['calc_cv']==False:
cv_frame = self.wTree.get_widget("cv_frame")
cv_frame.set_sensitive(False)
cv = self.wTree.get_widget("cv_method").set_active(0)
# confidence
if self._options['calc_conf']==True:
self.wTree.get_widget("subset_expander").set_sensitive(True)
else:
self.wTree.get_widget("subset_expander").set_sensitive(False)
cb = self.wTree.get_widget("subset_loc")
_m = {'med': 0, 'mean': 1, 'full_model': 2}
cb.set_active(_m.get(self._options['p_center']))
cb = self.wTree.get_widget("cov_loc")
_m = {'med': 0, 'mean': 1}
cb.set_active(_m.get(self._options['cov_center']))
hs = self.wTree.get_widget("alpha_scale")
hs.set_value(self._options['alpha'])
tb = self.wTree.get_widget("qvals")
tb.set_sensitive(True)
def on_amax_changed(self, sb):
logger.log("debug", "amax changed: new value: %s" %sb.get_value_as_int())
amax = sb.get_value_as_int()
# update aopt if needed
if amax<self._options['aopt']:
self._options['aopt'] = amax
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
aopt_sb.get_adjustment().set_all(self._options['aopt'], 1, amax, 1, 0, 0)
self._options['amax'] = sb.get_value_as_int()
def on_aopt_changed(self, sb):
aopt = sb.get_value_as_int()
self._options['aopt'] = aopt
def auto_aopt_toggled(self, tb):
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
if tb.get_active():
self._options['auto_aopt'] = True
aopt_sb.set_sensitive(False)
else:
self._options['auto_aopt'] = False
aopt_sb.set_sensitive(True)
def center_toggled(self, tb):
if tb.get_active():
self._options['center'] = True
else:
logger.log("debug", "centering set to False")
self._options['center'] = False
#def on_scale_changed(self, cb):
# scale = cb.get_active_text()
# if scale=='Scores':
# self._options['scale'] = 'scores'
# elif scale=='Loadings':
# self._options['scale'] = 'loads'
# else:
# raise IOError
def val_toggled(self, tb):
"""Callback for validation: None. """
cv_frame = self.wTree.get_widget("cv_frame")
cv_tb = self.wTree.get_widget("cv_toggle")
if tb.get_active():
self._options['calc_cv'] = False
cv_frame.set_sensitive(False)
cv_tb.set_sensitive(False)
else:
cv_tb.set_sensitive(True)
if cv_tb.get_active():
cv_frame.set_sensitive(True)
self._options['calc_cv'] = True
def cv_toggled(self, tb):
cv_frame = self.wTree.get_widget("cv_frame")
val_tb = self.wTree.get_widget("val_none_toggle")
if tb.get_active():
cv_frame.set_sensitive(True)
self._options['calc_cv'] = True
else:
cv_frame.set_sensitive(False)
self._options['calc_cv'] = False
def on_cv_method_changed(self, cb):
method = cb.get_active_text()
if method == 'Random':
self._options['cv_val_method'] = 'random'
def on_cv_sets_changed(self, sb):
val = sb.get_value_as_int()
self._options['cv_val_sets'] = val
def conf_toggled(self, tb):
if tb.get_active():
self._options['calc_conf'] = False
self.wTree.get_widget("subset_expander").set_sensitive(False)
else:
self._options['calc_conf'] = True
self.wTree.get_widget("subset_expander").set_sensitive(True)
def on_subset_loc_changed(self, cb):
method = cb.get_active_text()
if method=='Full model':
self._options['p_center'] = 'full_model'
elif method=='Median':
self._options['p_center'] = 'med'
elif method=='Mean':
self._options['p_center'] = 'mean'
def on_cov_loc_changed(self, cb):
method = cb.get_active_text()
if method=='Median':
self._options['cov_center'] = 'med'
elif method=='Mean':
self._options['cov_center'] = 'mean'
def on_alpha_changed(self, hs):
self._options['alpha'] = hs.get_value()
def on_rot_changed(self, rg):
proc, strict = rg
if proc.get_active():
self._options['crot'] = True
else:
self._options['crot'] = True
self._options['strict'] = True
def qval_toggled(self, tb):
if tb.get_active():
self._options['calc_qval'] = False
self.wTree.get_widget("qval_method").set_sensitive(False)
self.wTree.get_widget("q_iter").set_sensitive(False)
else:
self._options['calc_qval'] = True
self.wTree.get_widget("qval_method").set_sensitive(True)
self.wTree.get_widget("q_iter").set_sensitive(True)
def on_iter_changed(self, sb):
self._options['q_iter'] = sb.get_value()
def on_qval_changed(self, cb):
q_method = cb.get_active_text()
if method=='Shuffle rows':
self._options['q_pert_method'] = 'shuffle'
class PlsOptionsDialog(OptionsDialog):
"""Options dialog for Partial Least squares regression.
"""
def __init__(self, data, options, input_names=['X', 'Y']):
OptionsDialog.__init__(self, data, options, input_names)
glade_file = os.path.join(fluents.DATADIR, 'pls_options.glade')
notebook_name = "vbox1"
page_name = "Options"
self.add_page_from_glade(glade_file, notebook_name, page_name)
# connect signals to handlers
dic = {"on_amax_value_changed" : self.on_amax_changed,
"on_aopt_value_changed" : self.on_aopt_changed,
"auto_aopt_toggled" : self.auto_aopt_toggled,
"center_toggled" : self.center_toggled,
#"on_scale_changed" : self.on_scale_changed,
"on_val_none" : self.val_toggled,
"on_val_cv" : self.cv_toggled,
"on_cv_method_changed" : self.on_cv_method_changed,
"on_cv_sets_changed" : self.on_cv_sets_changed,
"on_conf_toggled" : self.conf_toggled,
"on_subset_loc_changed" : self.on_subset_loc_changed,
"on_cov_loc_changed" : self.on_cov_loc_changed,
"on_alpha_changed" : self.on_alpha_changed,
"on_rot_changed" : self.on_rot_changed,
"on__toggled" : self.conf_toggled,
"on_qval_changed" : self.on_qval_changed,
"on_iter_changed" : self.on_iter_changed
}
self.wTree.signal_autoconnect(dic)
# set/ensure valid default values/ranges
#
amax_sb = self.wTree.get_widget("amax_spinbutton")
max_comp = min(data[0].shape) # max num of components
if self._options['amax']>max_comp:
logger.log('debug', 'amax default too large ... adjusting')
self._options['amax'] = max_comp
amax_sb.get_adjustment().set_all(self._options['amax'], 1, max_comp, 1, 0, 0)
# aopt spin button
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
if self._options['aopt']>self._options['amax']:
self._options['aopt'] = self._options['amax'] + 1 - 1
aopt_sb.get_adjustment().set_all(self._options['aopt'], 1, self._options['amax'], 1, 0, 0)
# scale
# scale_cb = self.wTree.get_widget("scale_combobox")
# scale_cb.set_active(0)
# validation frames
if self._options['calc_cv']==False:
cv_frame = self.wTree.get_widget("cv_frame")
cv_frame.set_sensitive(False)
cv = self.wTree.get_widget("cv_method").set_active(0)
# confidence
if self._options['calc_conf']==True:
self.wTree.get_widget("subset_expander").set_sensitive(True)
else:
self.wTree.get_widget("subset_expander").set_sensitive(False)
cb = self.wTree.get_widget("subset_loc")
_m = {'med': 0, 'mean': 1, 'full_model': 2}
cb.set_active(_m.get(self._options['p_center']))
cb = self.wTree.get_widget("cov_loc")
_m = {'med': 0, 'mean': 1}
cb.set_active(_m.get(self._options['cov_center']))
hs = self.wTree.get_widget("alpha_scale")
hs.set_value(self._options['alpha'])
tb = self.wTree.get_widget("qvals")
tb.set_sensitive(True)
def on_amax_changed(self, sb):
logger.log("debug", "amax changed: new value: %s" %sb.get_value_as_int())
amax = sb.get_value_as_int()
# update aopt if needed
if amax<self._options['aopt']:
self._options['aopt'] = amax
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
aopt_sb.get_adjustment().set_all(self._options['aopt'], 1, amax, 1, 0, 0)
self._options['amax'] = sb.get_value_as_int()
def on_aopt_changed(self, sb):
aopt = sb.get_value_as_int()
self._options['aopt'] = aopt
def auto_aopt_toggled(self, tb):
aopt_sb = self.wTree.get_widget("aopt_spinbutton")
if tb.get_active():
self._options['auto_aopt'] = True
aopt_sb.set_sensitive(False)
else:
self._options['auto_aopt'] = False
aopt_sb.set_sensitive(True)
def center_toggled(self, tb):
if tb.get_active():
self._options['center'] = True
else:
logger.log("debug", "centering set to False")
self._options['center'] = False
#def on_scale_changed(self, cb):
# scale = cb.get_active_text()
# if scale=='Scores':
# self._options['scale'] = 'scores'
# elif scale=='Loadings':
# self._options['scale'] = 'loads'
# else:
# raise IOError
def val_toggled(self, tb):
"""Callback for validation: None. """
cv_frame = self.wTree.get_widget("cv_frame")
cv_tb = self.wTree.get_widget("cv_toggle")
if tb.get_active():
self._options['calc_cv'] = False
cv_frame.set_sensitive(False)
cv_tb.set_sensitive(False)
else:
cv_tb.set_sensitive(True)
if cv_tb.get_active():
cv_frame.set_sensitive(True)
self._options['calc_cv'] = True
def cv_toggled(self, tb):
cv_frame = self.wTree.get_widget("cv_frame")
val_tb = self.wTree.get_widget("val_none_toggle")
if tb.get_active():
cv_frame.set_sensitive(True)
self._options['calc_cv'] = True
else:
cv_frame.set_sensitive(False)
self._options['calc_cv'] = False
def on_cv_method_changed(self, cb):
method = cb.get_active_text()
if method == 'Random':
self._options['cv_val_method'] = 'random'
def on_cv_sets_changed(self, sb):
val = sb.get_value_as_int()
self._options['cv_val_sets'] = val
def conf_toggled(self, tb):
if tb.get_active():
self._options['calc_conf'] = False
self.wTree.get_widget("subset_expander").set_sensitive(False)
else:
self._options['calc_conf'] = True
self.wTree.get_widget("subset_expander").set_sensitive(True)
def on_subset_loc_changed(self, cb):
method = cb.get_active_text()
if method=='Full model':
self._options['p_center'] = 'full_model'
elif method=='Median':
self._options['p_center'] = 'med'
elif method=='Mean':
self._options['p_center'] = 'mean'
def on_cov_loc_changed(self, cb):
method = cb.get_active_text()
if method=='Median':
self._options['cov_center'] = 'med'
elif method=='Mean':
self._options['cov_center'] = 'mean'
def on_alpha_changed(self, hs):
self._options['alpha'] = hs.get_value()
def on_rot_changed(self, rg):
proc, strict = rg
if proc.get_active():
self._options['crot'] = True
else:
self._options['crot'] = True
self._options['strict'] = True
def qval_toggled(self, tb):
if tb.get_active():
self._options['calc_qval'] = False
self.wTree.get_widget("qval_method").set_sensitive(False)
self.wTree.get_widget("q_iter").set_sensitive(False)
else:
self._options['calc_qval'] = True
self.wTree.get_widget("qval_method").set_sensitive(True)
self.wTree.get_widget("q_iter").set_sensitive(True)
def on_iter_changed(self, sb):
self._options['q_iter'] = sb.get_value()
def on_qval_changed(self, cb):
q_method = cb.get_active_text()
if method=='Shuffle rows':
self._options['q_pert_method'] = 'shuffle'