"""Module contain algorithms for low-rank models. There is almost no typechecking of any kind here, just focus on speed """ import math from scipy.linalg import svd,inv from scipy import dot,empty,eye,newaxis,zeros,sqrt,diag,\ apply_along_axis,mean,ones,randn,empty_like,outer,r_,c_,\ rand,sum,cumsum,matrix, expand_dims,minimum,where has_sym=True try: from symeig import symeig except: has_sym = False has_sym=False def pca(a, aopt,scale='scores',mode='normal',center_axis=-1): """ Principal Component Analysis. Performs PCA on given matrix and returns results in a dictionary. :Parameters: a : array Data measurement matrix, (samples x variables) aopt : int Number of components to use, aopt<=min(samples, variables) :Returns: results : dict keys -- values, T -- scores, P -- loadings, E -- residuals, lev --leverages, ssq -- sum of squares, expvar -- cumulative explained variance, aopt -- number of components used :OtherParameters: mode : str Amount of info retained, ('fast', 'normal', 'detailed') center_axis : int Center along given axis. If neg.: no centering (-inf,..., matrix modes) :SeeAlso: - pcr : other blm - pls : other blm - lpls : other blm Notes ----- Uses kernel speed-up if m>>n or m<>> import scipy,engines >>> a=scipy.asarray([[1,2,3],[2,4,5]]) >>> dat=engines.pca(a, 2) >>> dat['expvar'] array([0.,99.8561562, 100.]) """ if center_axis>=0: a = a - expand_dims(a.mean(center_axis), center_axis) m, n = a.shape if m>(n+100) or n>(m+100): u, e, v = esvd(a) s = sqrt(e) else: u, s, vt = svd(a, 0) v = vt.T e = s**2 tol = 1e-10 eff_rank = sum(s>s[0]*tol) aopt = minimum(aopt, eff_rank) T = u*s s = s[:aopt] e = e[:aopt] T = T[:,:aopt] P = v[:,:aopt] if scale=='loads': T = T/s P = P*s if mode == 'fast': return {'T':T, 'P':P, 'aopt':aopt} if mode=='detailed': E = empty((aopt, m, n)) ssq = [] lev = [] expvarx = empty((aopt, aopt+1)) for ai in range(aopt): E[ai,:,:] = a - dot(T[:,:ai+1], P[:,:ai+1].T) ssq.append([(E[ai,:,:]**2).sum(0), (E[ai,:,:]**2).sum(1)]) if scale=='loads': lev.append([((s*T)**2).sum(1), (P**2).sum(1)]) else: lev.append([(T**2).sum(1), ((s*P)**2).sum(1)]) expvarx[ai,:] = r_[0, 100*e.cumsum()/e.sum()] else: # residuals E = a - dot(T, P.T) SEP = E**2 ssq = [SEP.sum(0), SEP.sum(1)] # leverages if scale=='loads': lev = [(1./m)+(T**2).sum(1), (1./n)+((P/s)**2).sum(1)] else: lev = [(1./m)+((T/s)**2).sum(1), (1./n)+(P**2).sum(1)] # variances expvarx = r_[0, 100*e.cumsum()/e.sum()] return {'T':T, 'P':P, 'E':E, 'expvarx':expvarx, 'levx':lev, 'ssqx':ssq, 'aopt':aopt} def pcr(a, b, aopt, scale='scores',mode='normal',center_axis=0): """ Principal Component Regression. Performs PCR on given matrix and returns results in a dictionary. :Parameters: a : array Data measurement matrix, (samples x variables) b : array Data response matrix, (samples x responses) aopt : int Number of components to use, aopt<=min(samples, variables) :Returns: results : dict keys -- values, T -- scores, P -- loadings, E -- residuals, levx -- leverages, ssqx -- sum of squares, expvarx -- cumulative explained variance, aopt -- number of components used :OtherParameters: mode : str Amount of info retained, ('fast', 'normal', 'detailed') center_axis : int Center along given axis. If neg.: no centering (-inf,..., matrix modes) :SeeAlso: - pcr : other blm - pls : other blm - lpls : other blm Notes ----- Uses kernel speed-up if m>>n or m<>> import scipy,engines >>> a=scipy.asarray([[1,2,3],[2,4,5]]) >>> dat=engines.pca(a, 2) >>> dat['expvar'] array([0.,99.8561562, 100.]) """ k, l = m_shape(b) if center_axis>=0: b = b - expand_dims(b.mean(center_axis), center_axis) dat = pca(a, aopt=aopt, scale=scale, mode=mode, center_axis=center_axis) T = dat['T'] weights = apply_along_axis(vnorm, 0, T) if scale=='loads': # fixme: check weights Q = dot(b.T, T*weights**2) else: Q = dot(b.T, T/weights**2) if mode=='fast': dat.update({'Q':Q}) return dat if mode=='detailed': F = empty((aopt, k, l)) for i in range(aopt): F[i,:,:] = b - dot(T[:,:i+1], Q[:,:i+1].T) else: F = b - dot(T, Q.T) #fixme: explained variance in Y + Y-var leverages dat.update({'Q':Q, 'F':F}) return dat def pls(a, b, aopt=2, scale='scores', mode='normal', ab=None): """Partial Least Squares Regression. Applies plsr to given matrices and returns results in a dictionary. Fast pls for calibration. Only inefficient for many Y-vars. """ m, n = a.shape if ab!=None: mm, ll = m_shape(ab) else: k, l = m_shape(b) assert(m==mm) assert(l==ll) W = empty((n, aopt)) P = empty((n, aopt)) R = empty((n, aopt)) Q = empty((l, aopt)) T = empty((m, aopt)) B = empty((aopt, n, l)) if ab==None: ab = dot(a.T, b) for i in range(aopt): if ab.shape[1]==1: w = ab.reshape(n, l) else: u, s, vh = svd(dot(ab.T, ab)) w = dot(ab, u[:,:1]) w = w/vnorm(w) r = w.copy() if i>0: # recursive estimate to for j in range(0, i, 1): r = r - dot(P[:,j].T, w)*R[:,j][:,newaxis] t = dot(a, r) tt = vnorm(t)**2 p = dot(a.T, t)/tt q = dot(r.T, ab).T/tt ab = ab - dot(p, q.T)*tt T[:,i] = t.ravel() W[:,i] = w.ravel() P[:,i] = p.ravel() R[:,i] = r.ravel() if mode=='fast' and i==aopt-1: if scale=='loads': tnorm = apply_along_axis(vnorm, 0, T) T = T/tnorm W = W*tnorm return {'T':T, 'W':W} Q[:,i] = q.ravel() B[i] = dot(R[:,:i+1], Q[:,:i+1].T) if mode=='detailed': E = empty((aopt, m, n)) F = empty((aopt, k, l)) for i in range(1, aopt+1, 1): E[i-1] = a - dot(T[:,:i], P[:,:i].T) F[i-1] = b - dot(T[:,:i], Q[:,:i].T) else: E = a - dot(T[:,:aopt], P[:,:aopt].T) F = b - dot(T[:,:aopt], Q[:,:aopt].T) if scale=='loads': tnorm = apply_along_axis(vnorm, 0, T) T = T/tnorm W = W*tnorm Q = Q*tnorm P = P*tnorm return {'B':B, 'Q':Q, 'P':P, 'T':T, 'W':W, 'R':R, 'E':E, 'F':F} def w_simpls(aat, b, aopt): """ Simpls for wide matrices. Fast pls for crossval, used in calc rmsep for wide X There is no P or W. T is normalised """ bb = b.copy() m, m = aat.shape U = empty((m, aopt)) T = empty((m, aopt)) H = empty((m, aopt)) #just like W in simpls PROJ = empty((m, aopt)) #just like R in simpls for i in range(aopt): u, s, vh = svd(dot(dot(b.T, aat), b), full_matrices=0) u = dot(b, u[:,:1]) #y-factor scores U[:,i] = u.ravel() t = dot(aat, u) t = t/vnorm(t) T[:,i] = t.ravel() h = dot(aat, t) #score-weights H[:,i] = h.ravel() PROJ[:,:i+1] = dot(T[:,:i+1], inv(dot(T[:,:i+1].T, H[:,:i+1])) ) if iY :input: X : data matrix (m, n) Y : data matrix (m, l) Z : data matrix (n, o) :output: T : X-scores W : X-weights/Z-weights P : X-loadings Q : Y-loadings U : X-Y relation L : Z-scores K : Z-loads B : Regression coefficients X->Y b0: Regression coefficient intercept evx : X-explained variance evy : Y-explained variance evz : Z-explained variance :Notes: """ if mean_ctr!=None: xctr, yctr, zctr = mean_ctr X, mnX = center(X, xctr) Y, mnY = center(Y, xctr) Z, mnZ = center(Z, zctr) varX = pow(X, 2).sum() varY = pow(Y, 2).sum() varZ = pow(Z, 2).sum() m, n = X.shape k, l = Y.shape u, o = Z.shape # initialize U = empty((k, a_max)) Q = empty((l, a_max)) T = empty((m, a_max)) W = empty((n, a_max)) P = empty((n, a_max)) K = empty((o, a_max)) L = empty((u, a_max)) var_x = empty((a_max,)) var_y = empty((a_max,)) var_z = empty((a_max,)) for a in range(a_max): if verbose: print "\n Working on comp. %s" %a u = Y[:,:1] diff = 1 MAX_ITER = 100 lim = 1e-5 niter = 0 while (diff>lim and niter=n: kernel = dot(data.T, data) if has_sym: if not amax: amax = n pcrange = [n-amax, n] s, v = symeig(kernel, range=pcrange, overwrite=True) s = s[::-1] v = v[:,arange(n, -1, -1)] else: u, s, vt = svd(kernel) v = vt.T u = dot(data, v) for i in xrange(n): s[i] = vnorm(u[:,i]) u[:,i] = u[:,i]/s[i] else: kernel = dot(data, data.T) if has_sym: if not amax: amax = m pcrange = [m-amax, m] s, u = symeig(kernel, range=pcrange, overwrite=True) else: u, s, vt = svd(kernel) v = dot(u.T, data) for i in xrange(m): s[i] = vnorm(v[i,:]) v[i,:] = v[i,:]/s[i] return u, s, v.T def vnorm(x): # assume column arrays (or vectors) return math.sqrt(dot(x.T, x)) def center(a, axis): # 0 = col center, 1 = row center, 2 = double center # -1 = nothing if axis==-1: mn = zeros((a.shape[1],)) elif axis==0: mn = a.mean(0) elif axis==1: mn = a.mean(1)[:,newaxis] elif axis==2: mn = a.mean(0) + a.mean(1)[:,newaxis] - a.mean() else: raise IOError("input error: axis must be in [-1,0,1,2]") return a - mn, mn