1"""Example of surface-based first-level analysis
2================================================
3
4A full step-by-step example of fitting a GLM to experimental data sampled on
5the cortical surface and visualizing the results.
6
7More specifically:
8
91. A sequence of fMRI volumes is loaded.
102. fMRI data are projected onto a reference cortical surface (the FreeSurfer
11   template, fsaverage).
123. A design matrix describing all the effects related to the data is computed.
134. A GLM is applied to the dataset (effect/covariance, then contrast estimation).
14
15The result of the analysis are statistical maps that are defined on the brain
16mesh. We display them using Nilearn capabilities.
17
18The projection of fMRI data onto a given brain mesh requires that both are
19initially defined in the same space.
20
21* The functional data should be coregistered to the anatomy from which the mesh
22  was obtained.
23
24* Another possibility, used here, is to project the normalized fMRI data to an
25  MNI-coregistered mesh, such as fsaverage.
26
27The advantage of this second approach is that it makes it easy to run
28second-level analyses on the surface. On the other hand, it is obviously less
29accurate than using a subject-tailored mesh.
30
31"""
32
33###############################################################################
34# Prepare data and analysis parameters
35# -------------------------------------
36# Prepare the timing parameters.
37t_r = 2.4
38slice_time_ref = 0.5
39
40###############################################################################
41# Prepare the data.
42# First, the volume-based fMRI data.
43from nilearn.datasets import fetch_localizer_first_level
44data = fetch_localizer_first_level()
45fmri_img = data.epi_img
46
47###############################################################################
48# Second, the experimental paradigm.
49events_file = data.events
50import pandas as pd
51events = pd.read_table(events_file)
52
53###############################################################################
54# Project the fMRI image to the surface
55# -------------------------------------
56#
57# For this we need to get a mesh representing the geometry of the surface. We
58# could use an individual mesh, but we first resort to a standard mesh, the
59# so-called fsaverage5 template from the FreeSurfer software.
60
61import nilearn
62fsaverage = nilearn.datasets.fetch_surf_fsaverage()
63
64###############################################################################
65# The projection function simply takes the fMRI data and the mesh.
66# Note that those correspond spatially, are they are both in MNI space.
67from nilearn import surface
68texture = surface.vol_to_surf(fmri_img, fsaverage.pial_right)
69
70###############################################################################
71# Perform first level analysis
72# ----------------------------
73#
74# This involves computing the design matrix and fitting the model.
75# We start by specifying the timing of fMRI frames.
76
77import numpy as np
78n_scans = texture.shape[1]
79frame_times = t_r * (np.arange(n_scans) + .5)
80
81###############################################################################
82# Create the design matrix.
83#
84# We specify an hrf model containing the Glover model and its time derivative
85# The drift model is implicitly a cosine basis with a period cutoff at 128s.
86from nilearn.glm.first_level import make_first_level_design_matrix
87design_matrix = make_first_level_design_matrix(frame_times,
88                                               events=events,
89                                               hrf_model='glover + derivative'
90                                               )
91
92###############################################################################
93# Setup and fit GLM.
94#
95# Note that the output consists in 2 variables: `labels` and `fit`.
96# `labels` tags voxels according to noise autocorrelation.
97# `estimates` contains the parameter estimates.
98# We keep them for later contrast computation.
99
100from nilearn.glm.first_level import run_glm
101labels, estimates = run_glm(texture.T, design_matrix.values)
102
103###############################################################################
104# Estimate contrasts
105# ------------------
106# Specify the contrasts.
107#
108# For practical purpose, we first generate an identity matrix whose size is
109# the number of columns of the design matrix.
110contrast_matrix = np.eye(design_matrix.shape[1])
111
112###############################################################################
113# At first, we create basic contrasts.
114basic_contrasts = dict([(column, contrast_matrix[i])
115                        for i, column in enumerate(design_matrix.columns)])
116
117###############################################################################
118# Next, we add some intermediate contrasts and
119# one contrast adding all conditions with some auditory parts.
120basic_contrasts['audio'] = (
121    basic_contrasts['audio_left_hand_button_press']
122    + basic_contrasts['audio_right_hand_button_press']
123    + basic_contrasts['audio_computation']
124    + basic_contrasts['sentence_listening'])
125
126# one contrast adding all conditions involving instructions reading
127basic_contrasts['visual'] = (
128    basic_contrasts['visual_left_hand_button_press']
129    + basic_contrasts['visual_right_hand_button_press']
130    + basic_contrasts['visual_computation']
131    + basic_contrasts['sentence_reading'])
132
133# one contrast adding all conditions involving computation
134basic_contrasts['computation'] = (basic_contrasts['visual_computation']
135                                  + basic_contrasts['audio_computation'])
136
137# one contrast adding all conditions involving sentences
138basic_contrasts['sentences'] = (basic_contrasts['sentence_listening']
139                                + basic_contrasts['sentence_reading'])
140
141###############################################################################
142# Finally, we create a dictionary of more relevant contrasts
143#
144# * 'left - right button press': probes motor activity in left versus right button presses.
145# * 'audio - visual': probes the difference of activity between listening to some content or reading the same type of content (instructions, stories).
146# * 'computation - sentences': looks at the activity when performing a mental computation task  versus simply reading sentences.
147#
148# Of course, we could define other contrasts, but we keep only 3 for simplicity.
149
150contrasts = {
151    'left - right button press': (
152        basic_contrasts['audio_left_hand_button_press']
153        - basic_contrasts['audio_right_hand_button_press']
154        + basic_contrasts['visual_left_hand_button_press']
155        - basic_contrasts['visual_right_hand_button_press']
156    ),
157    'audio - visual': basic_contrasts['audio'] - basic_contrasts['visual'],
158    'computation - sentences': (basic_contrasts['computation'] -
159                                basic_contrasts['sentences']
160    )
161}
162
163###############################################################################
164# Let's estimate the contrasts by iterating over them.
165from nilearn.glm.contrasts import compute_contrast
166from nilearn import plotting
167
168for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
169    print('  Contrast % i out of %i: %s, right hemisphere' %
170          (index + 1, len(contrasts), contrast_id))
171    # compute contrast-related statistics
172    contrast = compute_contrast(labels, estimates, contrast_val,
173                                contrast_type='t')
174    # we present the Z-transform of the t map
175    z_score = contrast.z_score()
176    # we plot it on the surface, on the inflated fsaverage mesh,
177    # together with a suitable background to give an impression
178    # of the cortex folding.
179    plotting.plot_surf_stat_map(
180        fsaverage.infl_right, z_score, hemi='right',
181        title=contrast_id, colorbar=True,
182        threshold=3., bg_map=fsaverage.sulc_right)
183
184###############################################################################
185# Analysing the left hemisphere
186# -----------------------------
187#
188# Note that re-creating the above analysis for the left hemisphere requires
189# little additional code!
190
191###############################################################################
192# We project the fMRI data to the mesh.
193texture = surface.vol_to_surf(fmri_img, fsaverage.pial_left)
194
195###############################################################################
196# Then we estimate the General Linear Model.
197labels, estimates = run_glm(texture.T, design_matrix.values)
198
199###############################################################################
200# Finally, we create contrast-specific maps and plot them.
201for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
202    print('  Contrast % i out of %i: %s, left hemisphere' %
203          (index + 1, len(contrasts), contrast_id))
204    # compute contrasts
205    contrast = compute_contrast(labels, estimates, contrast_val,
206                                contrast_type='t')
207    z_score = contrast.z_score()
208    # plot the result
209    plotting.plot_surf_stat_map(
210        fsaverage.infl_left, z_score, hemi='left',
211        title=contrast_id, colorbar=True,
212        threshold=3., bg_map=fsaverage.sulc_left)
213
214plotting.show()
215