Hi Ziad,
Thanks for your reply. My understanding is, with 3dZcutup, the input dataset for 3dDeconvovle now is 20 slices(in my case, I have 20 runs), looping over all the slices(we have 35 slices for each TR), instead of 20 runs. But when it goes to calculate signal to noise ratio, I got a fatal error (different size of voxels). Am I doing the right thing here? The following is my script for 3dZcutup and 3dDeconvovle. Thank you so much!
foreach sl (`count -dig 2 0 34`)
foreach run ( $runs )
3dZcutup -prefix zcut${sl}.r$run -keep $sl $sl pb02.$subj.r$run.blur+orig.HEAD
end
# run the regression analysis
3dDeconvolve -input zcut${sl}.r??+orig.HEAD \
-censor censor_${subj}_combined_2.1D \
-polort 3 -float \
-local_times \
-num_stimts 8 \
-stim_times 1 stimuli/large.txt 'BLOCK(2, 1)' \
-stim_label 1 Large \
-stim_times 2 stimuli/small.txt 'BLOCK(2, 1)' \
-stim_label 2 Small \
-stim_file 3 motion_demean.1D'[0]' -stim_base 3 -stim_label 3 roll \
-stim_file 4 motion_demean.1D'[1]' -stim_base 4 -stim_label 4 pitch \
-stim_file 5 motion_demean.1D'[2]' -stim_base 5 -stim_label 5 yaw \
-stim_file 6 motion_demean.1D'[3]' -stim_base 6 -stim_label 6 dS \
-stim_file 7 motion_demean.1D'[4]' -stim_base 7 -stim_label 7 dL \
-stim_file 8 motion_demean.1D'[5]' -stim_base 8 -stim_label 8 dP \
-gltsym 'SYM: Large -Small' \
-glt_label 1 Large-Small \
-jobs 4 \
-fout -tout -x1D X.xmat.1D -xjpeg X.jpg \
-x1D_uncensored X.nocensor.xmat.1D \
-fitts zcut${sl}_fitts \
-errts zcut${sl}_errts \
-cbucket zcut${sl}_all_betas \
-bucket zcut${sl}_stats
# if 3dDeconvolve fails, terminate the script
if ( $status != 0 ) then
echo '---------------------------------------'
echo '** 3dDeconvolve error, failing...'
echo ' (consider the file 3dDeconvolve.err)'
exit
endif
end
Best,
Lu