1#!/usr/local/bin/python3.8
2__version__="v2.5 beta11"
3welcome_block="""
4# Multi-Echo ICA, Version %s
5#
6# Kundu, P., Brenowitz, N.D., Voon, V., Worbe, Y., Vertes, P.E., Inati, S.J., Saad, Z.S.,
7# Bandettini, P.A. & Bullmore, E.T. Integrated strategy for improving functional
8# connectivity mapping using multiecho fMRI. PNAS (2013).
9#
10# Kundu, P., Inati, S.J., Evans, J.W., Luh, W.M. & Bandettini, P.A. Differentiating
11#   BOLD and non-BOLD signals in fMRI time series using multi-echo EPI. NeuroImage (2011).
12# http://dx.doi.org/10.1016/j.neuroimage.2011.12.028
13#
14# PROCEDURE 2 : Computes ME-PCA and ME-ICA
15# -Computes T2* map
16# -Computes PCA of concatenated ME data, then computes TE-dependence of PCs
17# -Computes ICA of TE-dependence PCs
18# -Identifies TE-dependent ICs, outputs high-\kappa (BOLD) component
19#    and denoised time series
20# -or- Computes TE-dependence of each component of a general linear model
21#    specified by input (includes MELODIC FastICA mixing matrix)
22""" % (__version__)
23
24import os, sys
25from optparse import OptionParser
26import numpy as np
27
28# try to import global nibabel, before that in path[0]
29p0 = sys.path.pop(0)
30try:
31   import nibabel as nib
32   print("-- have system nibabel")
33except:
34   print("-- importing nibabel from meica.libs")
35sys.path.insert(0, p0)
36import nibabel as nib
37
38from sys import stdout,argv
39import scipy.stats as stats
40import time
41import datetime
42
43if __name__=='__main__':
44	selfuncfile='%s/select_model.py' % os.path.dirname(argv[0])
45	execfile(selfuncfile)
46import cPickle as pickle
47#import ipdb
48import bz2
49
50F_MAX=500
51Z_MAX = 8
52
53def _interpolate(a, b, fraction):
54    """Returns the point at the given fraction between a and b, where
55    'fraction' must be between 0 and 1.
56    """
57    return a + (b - a)*fraction;
58
59
60# for reproducibility           15 May 2018 [rickr]
61def init_random_seeds(seed):
62    import random
63    from scipy import random as numx_rand
64    print '-- initializing random seed to %s' % seed
65    random.seed(seed)
66    numx_rand.seed(seed)
67
68
69def scoreatpercentile(a, per, limit=(), interpolation_method='lower'):
70    """
71    This function is grabbed from scipy
72
73    """
74    values = np.sort(a, axis=0)
75    if limit:
76        values = values[(limit[0] <= values) & (values <= limit[1])]
77
78    idx = per /100. * (values.shape[0] - 1)
79    if (idx % 1 == 0):
80        score = values[int(idx)]
81    else:
82        if interpolation_method == 'fraction':
83            score = _interpolate(values[int(idx)], values[int(idx) + 1],
84                                 idx % 1)
85        elif interpolation_method == 'lower':
86            score = values[int(np.floor(idx))]
87        elif interpolation_method == 'higher':
88            score = values[int(np.ceil(idx))]
89        else:
90            raise ValueError("interpolation_method can only be 'fraction', " \
91                             "'lower' or 'higher'")
92    return score
93
94def MAD(data, axis=None):
95    return np.median(np.abs(data - np.median(data, axis)), axis)
96
97def dice(A,B):
98	denom = np.array(A!=0,dtype=np.int).sum(0)+(np.array(B!=0,dtype=np.int).sum(0))
99	if denom!=0:
100		AB_un = andb([A!=0,B!=0])==2
101		numer = np.array(AB_un,dtype=np.int).sum(0)
102		return 2.*numer/denom
103	else:
104		return 0.
105
106def spatclust(data,mask,csize,thr,header,aff,infile=None,dindex=0,tindex=0):
107	if infile==None:
108		data = data.copy()
109		data[data<thr] = 0
110		niwrite(unmask(data,mask),aff,'__clin.nii.gz',header)
111		infile='__clin.nii.gz'
112	addopts=""
113	if data is not None and len(np.squeeze(data).shape)>1 and dindex+tindex==0: addopts="-doall"
114	else: addopts="-1dindex %s -1tindex %s" % (str(dindex),str(tindex))
115	os.system('3dmerge -overwrite %s -dxyz=1  -1clust 1 %i -1thresh %.02f -prefix __clout.nii.gz %s' % (addopts,int(csize),float(thr),infile))
116	clustered = fmask(nib.load('__clout.nii.gz').get_data(),mask)!=0
117	return clustered
118
119def rankvec(vals):
120	asort = np.argsort(vals)
121	ranks = np.zeros(vals.shape[0])
122	ranks[asort]=np.arange(vals.shape[0])+1
123	return ranks
124
125def niwrite(data,affine, name , header=None):
126	stdout.write(" + Writing file: %s ...." % name)
127
128	thishead = header
129	if thishead == None:
130		thishead = head.copy()
131		thishead.set_data_shape(list(data.shape))
132
133	outni = nib.Nifti1Image(data,affine,header=thishead)
134	outni.to_filename(name)
135	print 'done.'
136
137def cat2echos(data,Ne):
138	"""
139	cat2echos(data,Ne)
140
141	Input:
142	data shape is (nx,ny,Ne*nz,nt)
143	"""
144	nx,ny = data.shape[0:2]
145	nz = data.shape[2]/Ne
146	if len(data.shape) >3:
147		nt = data.shape[3]
148	else:
149		nt = 1
150	return np.reshape(data,(nx,ny,nz,Ne,nt),order='F')
151
152def uncat2echos(data,Ne):
153	"""
154	uncat2echos(data,Ne)
155
156	Input:
157	data shape is (nx,ny,Ne,nz,nt)
158	"""
159    	nx,ny = data.shape[0:2]
160	nz = data.shape[2]*Ne
161	if len(data.shape) >4:
162		nt = data.shape[4]
163	else:
164		nt = 1
165	return np.reshape(data,(nx,ny,nz,nt),order='F')
166
167def makemask(cdat):
168
169	nx,ny,nz,Ne,nt = cdat.shape
170
171	mask = np.ones((nx,ny,nz),dtype=np.bool)
172
173	for i in range(Ne):
174		tmpmask = (cdat[:,:,:,i,:] != 0).prod(axis=-1,dtype=np.bool)
175		mask = mask & tmpmask
176
177	return mask
178
179def fmask(data,mask):
180	"""
181	fmask(data,mask)
182
183	Input:
184	data shape is (nx,ny,nz,...)
185	mask shape is (nx,ny,nz)
186
187	Output:
188	out shape is (Nm,...)
189	"""
190
191	s = data.shape
192	sm = mask.shape
193
194	N = s[0]*s[1]*s[2]
195	news = []
196	news.append(N)
197
198	if len(s) >3:
199		news.extend(s[3:])
200
201	tmp1 = np.reshape(data,news)
202	fdata = tmp1.compress((mask > 0 ).ravel(),axis=0)
203
204	return fdata.squeeze()
205
206def unmask (data,mask):
207	"""
208	unmask (data,mask)
209
210	Input:
211
212	data has shape (Nm,nt)
213	mask has shape (nx,ny,nz)
214
215	"""
216	M = (mask != 0).ravel()
217	Nm = M.sum()
218
219	nx,ny,nz = mask.shape
220
221	if len(data.shape) > 1:
222		nt = data.shape[1]
223	else:
224		nt = 1
225
226	out = np.zeros((nx*ny*nz,nt),dtype=data.dtype)
227	out[M,:] = np.reshape(data,(Nm,nt))
228
229	return np.reshape(out,(nx,ny,nz,nt))
230
231def t2smap(catd,mask,tes):
232	"""
233	t2smap(catd,mask,tes)
234
235	Input:
236
237	catd  has shape (nx,ny,nz,Ne,nt)
238	mask  has shape (nx,ny,nz)
239	tes   is a 1d numpy array
240	"""
241	nx,ny,nz,Ne,nt = catd.shape
242	N = nx*ny*nz
243
244	echodata = fmask(catd,mask)
245	Nm = echodata.shape[0]
246
247	#Do Log Linear fit
248	B = np.reshape(np.abs(echodata), (Nm,Ne*nt)).transpose()
249	B = np.log(B)
250	x = np.array([np.ones(Ne),-tes])
251	X = np.tile(x,(1,nt))
252	X = np.sort(X)[:,::-1].transpose()
253
254	beta,res,rank,sing = np.linalg.lstsq(X,B)
255	t2s = 1/beta[1,:].transpose()
256	s0  = np.exp(beta[0,:]).transpose()
257
258	#Goodness of fit
259	alpha = (np.abs(B)**2).sum(axis=0)
260	t2s_fit = blah = (alpha - res)/(2*res)
261
262	out = unmask(t2s,mask),unmask(s0,mask),unmask(t2s_fit,mask)
263
264	return out
265
266def get_coeffs(data,mask,X,add_const=False):
267	"""
268	get_coeffs(data,X)
269
270	Input:
271
272	data has shape (nx,ny,nz,nt)
273	mask has shape (nx,ny,nz)
274	X    has shape (nt,nc)
275
276	Output:
277
278	out  has shape (nx,ny,nz,nc)
279	"""
280	mdata = fmask(data,mask).transpose()
281
282        X=np.atleast_2d(X)
283        if X.shape[0]==1: X=X.T
284        Xones = np.atleast_2d(np.ones(np.min(mdata.shape))).T
285        if add_const: X = np.hstack([X,Xones])
286
287	tmpbetas = np.linalg.lstsq(X,mdata)[0].transpose()
288        if add_const: tmpbetas = tmpbetas[:,:-1]
289	out = unmask(tmpbetas,mask)
290
291	return out
292
293
294def andb(arrs):
295	result = np.zeros(arrs[0].shape)
296	for aa in arrs: result+=np.array(aa,dtype=np.int)
297	return result
298
299def optcom(data,t2s,tes,mask):
300	"""
301	out = optcom(data,t2s)
302
303
304	Input:
305
306	data.shape = (nx,ny,nz,Ne,Nt)
307	t2s.shape  = (nx,ny,nz)
308	tes.shape  = (Ne,)
309
310	Output:
311
312	out.shape = (nx,ny,nz,Nt)
313	"""
314	nx,ny,nz,Ne,Nt = data.shape
315
316	fdat = fmask(data,mask)
317	ft2s = fmask(t2s,mask)
318
319	tes = tes[np.newaxis,:]
320	ft2s = ft2s[:,np.newaxis]
321
322	alpha = tes * np.exp(-tes /ft2s)
323	alpha = np.tile(alpha[:,:,np.newaxis],(1,1,Nt))
324
325	fout  = np.average(fdat,axis = 1,weights=alpha)
326	out = unmask(fout,mask)
327	print 'Out shape is ', out.shape
328	return out
329
330def getelbow(ks):
331	nc = ks.shape[0]
332	coords = np.array([np.arange(nc),ks])
333	p  = coords - np.tile(np.reshape(coords[:,0],(2,1)),(1,nc))
334	b  = p[:,-1]
335	b_hat = np.reshape(b/np.sqrt((b**2).sum()),(2,1))
336	proj_p_b = p - np.dot(b_hat.T,p)*np.tile(b_hat,(1,nc))
337	d = np.sqrt((proj_p_b**2).sum(axis=0))
338	k_min_ind = d.argmax()
339	k_min  = ks[k_min_ind]
340	return k_min_ind
341
342def getfbounds(ne):
343	F05s=[None,None,18.5,10.1,7.7,6.6,6.0,5.6,5.3,5.1,5.0]
344	F025s=[None,None,38.5,17.4,12.2,10,8.8,8.1,7.6,7.2,6.9]
345	F01s=[None,None,98.5,34.1,21.2,16.2,13.8,12.2,11.3,10.7,10.]
346	return F05s[ne-1],F025s[ne-1],F01s[ne-1]
347
348def eimask(dd,ees=None):
349	if ees==None: ees=range(dd.shape[1])
350	imask = np.zeros([dd.shape[0],len(ees)])
351	for ee in ees:
352		print ee
353		lthr = 0.001*scoreatpercentile(dd[:,ee,:].flatten(),98)
354		hthr = 5*scoreatpercentile(dd[:,ee,:].flatten(),98)
355		print lthr,hthr
356		imask[dd[:,ee,:].mean(1) > lthr,ee]=1
357		imask[dd[:,ee,:].mean(1) > hthr,ee]=0
358	return imask
359
360def tedpca(ste=0):
361	nx,ny,nz,ne,nt = catd.shape
362	ste = np.array([int(ee) for ee in str(ste).split(',')])
363	if len(ste) == 1 and ste[0]==-1:
364		print "-Computing PCA of optimally combined multi-echo data"
365		OCcatd = optcom(catd,t2s,tes,mask)
366		OCmask = makemask(OCcatd[:,:,:,np.newaxis,:])
367		d = fmask(OCcatd,OCmask)
368		eim = eimask(d[:,np.newaxis,:])
369		eim = eim[:,0]==1
370		d = d[eim,:]
371		#ipdb.set_trace()
372	elif len(ste) == 1 and ste[0]==0:
373		print "-Computing PCA of spatially concatenated multi-echo data"
374		ste = np.arange(ne)
375		d = np.float64(fmask(catd,mask))
376		eim = eimask(d)==1
377		d = d[eim]
378	else:
379		print "-Computing PCA of TE #%s" % ','.join([str(ee) for ee in ste])
380		d = np.float64(np.concatenate([fmask(catd[:,:,:,ee,:],mask)[:,np.newaxis,:] for ee in ste-1],axis=1))
381		eim = eimask(d)==1
382		eim = np.squeeze(eim)
383		d = np.squeeze(d[eim])
384
385	dz = ((d.T-d.T.mean(0))/d.T.std(0)).T #Variance normalize timeseries
386	dz = (dz-dz.mean())/dz.std() #Variance normalize everything
387
388	pcastate_fn = 'pcastate.pklbz'
389
390	if not os.path.exists(pcastate_fn):
391		##Do PC dimension selection
392		#Get eigenvalue cutoff
393		u,s,v = np.linalg.svd(dz,full_matrices=0)
394		sp = s/s.sum()
395		eigelb = sp[getelbow(sp)]
396
397		#ipdb.set_trace()
398
399		spdif = np.abs(sp[1:]-sp[:-1])
400		spdifh = spdif[spdif.shape[0]/2:]
401		spdmin = spdif.min()
402		spdthr = np.mean([spdifh.max(),spdmin])
403		spmin = sp[(spdif.shape[0]/2)+(np.arange(spdifh.shape[0])[spdifh>=spdthr][0])+1]
404		spcum = []
405		spcumv = 0
406		for sss in sp:
407			spcumv+=sss
408			spcum.append(spcumv)
409		spcum = np.array(spcum)
410
411		#Compute K and Rho for PCA comps
412
413		#ipdb.set_trace()
414
415		eimum = np.atleast_2d(eim)
416		eimum = np.transpose(eimum,np.argsort(np.atleast_2d(eim).shape)[::-1])
417		eimum = np.array(np.squeeze(unmask(eimum.prod(1),mask)),dtype=np.bool)
418		vTmix = v.T
419		vTmixN =((vTmix.T-vTmix.T.mean(0))/vTmix.T.std(0)).T
420		#ctb,KRd,betasv,v_T = fitmodels2(catd,v.T,eimum,t2s,tes,mmixN=vTmixN)
421		none,ctb,betasv,v_T = fitmodels_direct(catd,v.T,eimum,t2s,tes,mmixN=vTmixN,full_sel=False)
422		ctb = ctb[ctb[:,0].argsort(),:]
423		ctb = np.vstack([ctb.T[0:3],sp]).T
424
425		#Save state
426		print "Saving PCA"
427		pcastate = {'u':u,'s':s,'v':v,'ctb':ctb,'eigelb':eigelb,'spmin':spmin,'spcum':spcum}
428		pcastate_f = bz2.BZ2File('pcastate.pklbz','wb')
429		pickle.dump(pcastate,pcastate_f)
430		pcastate_f.close()
431
432	else:
433		print "Loading PCA"
434		pcastate_f = bz2.BZ2File('pcastate.pklbz','rb')
435		pcastate = pickle.load(pcastate_f)
436		for key,val in pcastate.items(): exec(key + '=val')
437
438	np.savetxt('comp_table_pca.txt',ctb[ctb[:,1].argsort(),:][::-1])
439	np.savetxt('mepca_mix.1D',v[ctb[:,1].argsort()[::-1],:].T)
440
441	kappas = ctb[ctb[:,1].argsort(),1]
442	rhos = ctb[ctb[:,2].argsort(),2]
443	fmin,fmid,fmax = getfbounds(ne)
444	kappa_thr = np.average(sorted([fmin,kappas[getelbow(kappas)]/2,fmid]),weights=[kdaw,1,1])
445	rho_thr = np.average(sorted([fmin,rhos[getelbow(rhos)]/2,fmid]),weights=[rdaw,1,1])
446	if int(kdaw)==-1:
447		kappas_lim = kappas[andb([kappas<fmid,kappas>fmin])==2]
448		#kappas_lim = kappas[andb([kappas<kappas[getelbow(kappas)],kappas>fmin])==2]
449		kappa_thr = kappas_lim[getelbow(kappas_lim)]
450		rhos_lim = rhos[andb([rhos<fmid,rhos>fmin])==2]
451		rho_thr = rhos_lim[getelbow(rhos_lim)]
452		options.stabilize=True
453	if int(kdaw)!=-1 and int(rdaw)==-1:
454		rhos_lim = rhos[andb([rhos<fmid,rhos>fmin])==2]
455		rho_thr = rhos_lim[getelbow(rhos_lim)]
456	if options.stabilize:
457		pcscore = (np.array(ctb[:,1]>kappa_thr,dtype=np.int)+np.array(ctb[:,2]>rho_thr,dtype=np.int)+np.array(ctb[:,3]>eigelb,dtype=np.int))*np.array(ctb[:,3]>spmin,dtype=np.int)*np.array(spcum<0.95,dtype=np.int)*np.array(ctb[:,2]>fmin,dtype=np.int)*np.array(ctb[:,1]>fmin,dtype=np.int)*np.array(ctb[:,1]!=F_MAX,dtype=np.int)*np.array(ctb[:,2]!=F_MAX,dtype=np.int)
458	else:
459		pcscore = (np.array(ctb[:,1]>kappa_thr,dtype=np.int)+np.array(ctb[:,2]>rho_thr,dtype=np.int)+np.array(ctb[:,3]>eigelb,dtype=np.int))*np.array(ctb[:,3]>spmin,dtype=np.int)*np.array(ctb[:,1]!=F_MAX,dtype=np.int)*np.array(ctb[:,2]!=F_MAX,dtype=np.int)
460	pcsel = pcscore > 0
461	pcrej = np.array(pcscore==0,dtype=np.int)*np.array(ctb[:,3]>spmin,dtype=np.int) > 0
462
463	#ipdb.set_trace()
464
465	dd = u.dot(np.diag(s*np.array(pcsel,dtype=np.int))).dot(v)
466	nc = s[pcsel].shape[0]
467	print pcsel
468	print "--Selected %i components. Minimum Kappa=%0.2f Rho=%0.2f" % (nc,kappa_thr,rho_thr)
469
470	dd = ((dd.T-dd.T.mean(0))/dd.T.std(0)).T #Variance normalize timeseries
471	dd = (dd-dd.mean())/dd.std() #Variance normalize everything
472
473	return nc,dd
474
475def tedica(dd,cost):
476	"""
477	Input is dimensionally reduced spatially concatenated multi-echo time series dataset from tedpca()
478	Output is comptable, mmix, smaps from ICA, and betas from fitting catd to mmix
479	"""
480	#Do ICA
481	climit = float("%s" % options.conv)
482	#icanode = mdp.nodes.FastICANode(white_comp=nc, white_parm={'svd':True},approach='symm', g=cost, fine_g=options.finalcost, limit=climit, verbose=True)
483	icanode = mdp.nodes.FastICANode(white_comp=nc,approach='symm', g=cost, fine_g=options.finalcost, primary_limit=climit*100, limit=climit, verbose=True)
484	icanode.train(dd)
485	smaps = icanode.execute(dd)
486	mmix = icanode.get_recmatrix().T
487	mmix = (mmix-mmix.mean(0))/mmix.std(0)
488	return mmix
489
490def write_split_ts(data,comptable,mmix,suffix=''):
491	mdata = fmask(data,mask)
492	betas = fmask(get_coeffs(unmask((mdata.T-mdata.T.mean(0)).T,mask),mask,mmix),mask)
493	dmdata = mdata.T-mdata.T.mean(0)
494	varexpl = (1-((dmdata.T-betas.dot(mmix.T))**2.).sum()/(dmdata**2.).sum())*100
495	print 'Variance explained: ', varexpl , '%'
496
497	midkts = betas[:,midk].dot(mmix.T[midk,:])
498	lowkts = betas[:,rej].dot(mmix.T[rej,:])
499	if len(acc)!=0:
500		niwrite(unmask(betas[:,acc].dot(mmix.T[acc,:]),mask),aff,'_'.join(['hik_ts',suffix])+'.nii')
501	if len(midk)!=0:
502		niwrite(unmask(midkts,mask),aff,'_'.join(['midk_ts',suffix])+'.nii')
503	if len(rej)!=0:
504		niwrite(unmask(lowkts,mask),aff,'_'.join(['lowk_ts',suffix])+'.nii')
505	niwrite(unmask(fmask(data,mask)-lowkts-midkts,mask),aff,'_'.join(['dn_ts',suffix])+'.nii')
506	return varexpl
507
508def split_ts(data,comptable,mmix):
509	cbetas = get_coeffs(data-data.mean(-1)[:,:,:,np.newaxis],mask,mmix)
510	betas = fmask(cbetas,mask)
511	if len(acc)!=0:
512		hikts=unmask(betas[:,acc].dot(mmix.T[acc,:]),mask)
513	else:
514		hikts = None
515	return hikts,data-hikts
516
517def writefeats(cbetas,comptable,mmix,suffix=''):
518	#Write signal changes (dS)
519	niwrite(cbetas[:,:,:,:],aff,'_'.join(['betas',suffix])+'.nii')
520	niwrite(cbetas[:,:,:,acc],aff,'_'.join(['betas_hik',suffix])+'.nii')
521	#Compute features (dS/S)
522	if options.e2d==None: e2d=int(np.floor(ne/2))+1
523	edm = fmask(catd[:,:,:,e2d-1,:],mask)
524	edms = edm/edm.std(-1)[:,np.newaxis]
525	edms[edm<1]=0
526	hik,noise = split_ts(unmask(edms,mask),comptable,mmix)
527	noise = noise-noise.mean(-1)[:,:,:,np.newaxis]
528
529	zfac = 1./(mmix.shape[0]-len(acc)-1)*(noise**2).sum(-1) #noise scaling
530	niwrite(zfac,aff,'zfac.nii')
531
532	cbetam = fmask(cbetas[:,:,:,acc],mask)
533	cbetam = (cbetam-cbetam.mean(0))/cbetam.std(0)
534	cbetam = cbetam/fmask(zfac,mask)[:,np.newaxis]
535	cbetam[edm.mean(-1)<1,:] = 0
536
537	niwrite(unmask(cbetam,mask),aff,'_'.join(['feats',suffix])+'.nii')
538
539def computefeats2(data,mmix,mask,normalize=True):
540	#Write feature versions of components
541	data = data[mask]
542	data_vn = (data-data.mean(axis=-1)[:,np.newaxis])/data.std(axis=-1)[:,np.newaxis]
543	data_R = get_coeffs(unmask(data_vn,mask),mask,mmix)[mask]
544	data_R[data_R<-.999] = -0.999
545	data_R[data_R>.999] = .999
546	data_Z = np.arctanh(data_R)
547	if normalize:
548		#data_Z2 = ((data_Z.T-data_Z.mean(0)[:,np.newaxis])/data_Z.std(0)[:,np.newaxis]).T
549		data_Z = (((data_Z.T-data_Z.mean(0)[:,np.newaxis])/data_Z.std(0)[:,np.newaxis])  + (data_Z.mean(0)/data_Z.std(0))[:,np.newaxis]).T
550	return data_Z
551
552def writefeats2(data,mmix,mask,suffix=''):
553	#Write feature versions of components
554	feats = computefeats2(data,mmix,mask)
555	niwrite(unmask(feats,mask),aff,'_'.join(['feats',suffix])+'.nii')
556
557def writect(comptable,ctname='',varexpl='-1',classarr=[]):
558	global acc,rej,midk,empty
559	if len(classarr)!=0:
560		acc,rej,midk,empty = classarr
561	nc = comptable.shape[0]
562	ts = time.time()
563	st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
564	sortab = comptable[comptable[:,1].argsort()[::-1],:]
565	if ctname=='': ctname = 'comp_table.txt'
566	open('accepted.txt','w').write(','.join([str(int(cc)) for cc in acc]))
567	open('rejected.txt','w').write(','.join([str(int(cc)) for cc in rej]))
568	open('midk_rejected.txt','w').write(','.join([str(int(cc)) for cc in midk]))
569	with open(ctname,'w') as f:
570		f.write("#\n#ME-ICA Component statistics table for: %s \n#Run on %s \n#\n" % (os.path.abspath(os.path.curdir),st) )
571		f.write("#Dataset variance explained by ICA (VEx): %.02f \n" %  ( varexpl ) )
572		f.write("#Total components generated by decomposition (TCo): %i \n" %  ( nc ) )
573		f.write("#No. accepted BOLD-like components, i.e. effective degrees of freedom for correlation (lower bound; DFe): %i\n" %  ( len(acc) ) )
574		f.write("#Total number of rejected components (RJn): %i\n" %  (len(midk)+len(rej)) )
575		f.write("#Nominal degress of freedom in denoised time series (..._medn.nii.gz; DFn): %i \n" %  (nt-len(midk)-len(rej)) )
576		f.write("#ACC %s \t#Accepted BOLD-like components\n" % ','.join([str(int(cc)) for cc in acc]) )
577		f.write("#REJ %s \t#Rejected non-BOLD components\n" % ','.join([str(int(cc)) for cc in rej]) )
578		f.write("#MID %s \t#Rejected R2*-weighted artifacts\n" % ','.join([str(int(cc)) for cc in midk]) )
579		f.write("#IGN %s \t#Ignored components (kept in denoised time series)\n" % ','.join([str(int(cc)) for cc in empty]) )
580		f.write("#VEx	TCo	DFe	RJn	DFn	\n")
581		f.write("##%.02f	%i	%i	%i	%i \n" % (varexpl,nc,len(acc),len(midk)+len(rej),nt-len(midk)-len(rej)))
582		f.write("#	comp	Kappa	Rho	%%Var	%%Var(norm)	\n")
583		for i in range(nc):
584			f.write('%d\t%f\t%f\t%.2f\t%.2f\n'%(sortab[i,0],sortab[i,1],sortab[i,2],sortab[i,3],sortab[i,4]))
585
586def writeresults():
587	print "++ Writing optimally combined time series"
588	ts = optcom(catd,t2s,tes,mask)
589	niwrite(ts,aff,'ts_OC.nii')
590	print "++ Writing Kappa-filtered optimally combined timeseries"
591	varexpl = write_split_ts(ts,comptable,mmix,'OC')
592	print "++ Writing signal versions of components"
593	ts_B = get_coeffs(ts,mask,mmix)
594	niwrite(ts_B[:,:,:,:],aff,'_'.join(['betas','OC'])+'.nii')
595	if len(acc)!=0:
596		niwrite(ts_B[:,:,:,acc],aff,'_'.join(['betas_hik','OC'])+'.nii')
597		print "++ Writing optimally combined high-Kappa features"
598		writefeats2(split_ts(ts,comptable,mmix)[0],mmix[:,acc],mask,'OC2')
599	print "++ Writing component table"
600	writect(comptable,'comp_table.txt',varexpl)
601	if options.e2d!=None:
602		options.e2d=int(options.e2d)
603		print "++ Writing Kappa-filtered TE#%i timeseries" % (options.e2d)
604		write_split_ts(catd[:,:,:,options.e2d-1,:],comptable,mmix,'e%i' % options.e2d)
605		print "++ Writing high-Kappa TE#%i  features" % (options.e2d)
606		writefeats(betas[:,:,:,options.e2d-1,:],comptable,mmix,'e%i' % options.e2d)
607
608
609
610###################################################################################################
611# 						Begin Main
612###################################################################################################
613
614if __name__=='__main__':
615
616	parser=OptionParser()
617	parser.add_option('-d',"--orig_data",dest='data',help="Spatially Concatenated Multi-Echo Dataset",default=None)
618	parser.add_option('-e',"--TEs",dest='tes',help="Echo times (in ms) ex: 15,39,63",default=None)
619	parser.add_option('',"--mix",dest='mixm',help="Mixing matrix. If not provided, ME-PCA & ME-ICA (MDP) is done.",default=None)
620	parser.add_option('',"--manacc",dest='manacc',help="Comma separated list of manually accepted components",default=None)
621	parser.add_option('',"--kdaw",dest='kdaw',help="Dimensionality augmentation weight (Kappa). Default 10. -1 for low-dimensional ICA",default=10.)
622	parser.add_option('',"--rdaw",dest='rdaw',help="Dimensionality augmentation weight (Rho). Default 1. -1 for low-dimensional ICA",default=1.)
623	parser.add_option('',"--conv",dest='conv',help="Convergence limit. Default 2.5e-5",default='2.5e-5')
624	parser.add_option('',"--sourceTEs",dest='ste',help="Source TEs for models. ex: -ste 2,3 ; -ste 0 for all, -1 for opt. com. Default -1.",default=0-1)
625	parser.add_option('',"--denoiseTE",dest='e2d',help="TE to denoise. Default middle",default=None)
626	parser.add_option('',"--initcost",dest='initcost',help="Initial cost func. for ICA: pow3,tanh(default),gaus,skew",default='tanh')
627	parser.add_option('',"--finalcost",dest='finalcost',help="Final cost func, same opts. as initial",default='tanh')
628	parser.add_option('',"--seed",dest='seed',help="Init random number seeds",default=None)
629	parser.add_option('',"--stabilize",dest='stabilize',action='store_true',help="Stabilize convergence by reducing dimensionality, for low quality data",default=False)
630	parser.add_option('',"--fout",dest='fout',help="Output TE-dependence Kappa/Rho SPMs",action="store_true",default=False)
631	parser.add_option('',"--label",dest='label',help="Label for output directory.",default=None)
632
633	(options,args) = parser.parse_args()
634
635	print "-- ME-PCA/ME-ICA Component for ME-ICA %s--" % __version__
636
637	if options.tes==None or options.data==None:
638		print "*+ Need at least data and TEs, use -h for help."
639		sys.exit()
640
641        # maybe init seeds
642        if options.seed is not None:
643                init_random_seeds(int(options.seed))
644
645	print "++ Loading Data"
646	tes = np.fromstring(options.tes,sep=',',dtype=np.float32)
647	ne = tes.shape[0]
648	catim  = nib.load(options.data)
649	head   = catim.get_header()
650	head.extensions = []
651	head.set_sform(head.get_sform(),code=1)
652	aff = catim.get_affine()
653	catd = cat2echos(catim.get_data(),ne)
654	nx,ny,nz,Ne,nt = catd.shape
655	mu  = catd.mean(axis=-1)
656	sig  = catd.std(axis=-1)
657
658	"""Parse options, prepare output directory"""
659	if options.fout: options.fout = aff
660	else: options.fout=None
661	kdaw = float(options.kdaw)
662	rdaw = float(options.rdaw)
663	if options.label!=None: dirname='%s' % '.'.join(['TED',options.label])
664	else: dirname='TED'
665	os.system('mkdir %s' % dirname)
666	if options.mixm!=None:
667		try:
668			os.system('cp %s %s/meica_mix.1D; cp %s %s/%s' % (options.mixm,dirname,options.mixm,dirname,os.path.basename(options.mixm)))
669		except:
670			pass
671	os.chdir(dirname)
672
673	print "++ Computing Mask"
674	mask  = makemask(catd)
675
676	print "++ Computing T2* map"
677	t2s,s0,t2s_fit   = t2smap(catd,mask,tes)
678	#Condition values
679	cap_t2s = scoreatpercentile(t2s.flatten(),99.5)
680	t2s[t2s>cap_t2s*10]=cap_t2s
681	niwrite(s0,aff,'s0v.nii')
682	niwrite(t2s,aff,'t2sv.nii')
683	niwrite(t2s_fit,aff,'t2sF.nii')
684
685	if options.mixm == None:
686		print "++ Doing ME-PCA and ME-ICA with MDP"
687		import mdp
688		nc,dd = tedpca(options.ste)
689		mmix_orig = tedica(dd,cost=options.initcost)
690		np.savetxt('__meica_mix.1D',mmix_orig)
691		seldict,comptable,betas,mmix = fitmodels_direct(catd,mmix_orig,mask,t2s,tes,options.fout,reindex=True)
692		np.savetxt('meica_mix.1D',mmix)
693		acc,rej,midk,empty = selcomps(seldict,knobargs=args)
694		del dd
695	else:
696		mmix_orig = np.loadtxt('meica_mix.1D')
697		eim = eimask(np.float64(fmask(catd,mask)))==1
698		eimum = np.array(np.squeeze(unmask(np.array(eim,dtype=np.int).prod(1),mask)),dtype=np.bool)
699		seldict,comptable,betas,mmix = fitmodels_direct(catd,mmix_orig,mask,t2s,tes,options.fout)
700		acc,rej,midk,empty = selcomps(seldict,knobargs=args)
701
702	if len(acc)==0:
703		print "\n** WARNING! No BOLD components detected!!! Please check data and results!\n"
704
705	writeresults()
706