forked from treanus/KUL_NIS
-
Notifications
You must be signed in to change notification settings - Fork 0
/
KUL_preproc_all.sh
executable file
·1823 lines (1345 loc) · 66.3 KB
/
KUL_preproc_all.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
# @ Stefan Sunaert & Ahmed Radwan- UZ/KUL - [email protected]
#
# v0.1 - dd 06/11/2018 - first version
version="v1.1 - dd 21/01/2021"
verbose_level=1
# This is the main script of the KUL_NeuroImaging_Toools
#
# Description:
# This script preprocces an entire study (multiple subjects) with structural, functional and diffusion data at Stefan's lab
# It will:
# - perform mriqc on structural and functional data
# - perform fmriprep on structural and functional data
# - perform freesurfer on the structural data (only T1w for now)
# - perform mrtix3 and related processing on dMRI data
# - optionally:
# - perform combined structural and dMRI data analysis (depends on fmriprep, freesurfer and mrtrix3 above)
# - perform fibertractography
#
# Requirements:
# A correct installation of your mac (for now, maybe later also a hpc) at the lab
# - including:
# - dcm2niix
# - dcm2bids
# - docker
# - freesurfer
# - mrtrix
# - last but not least, a correct installation of up-to-date KUL_NeuroImaging_Tools
# - correct setup of your .bashrc and .bash_profile
kul_main_dir=$(dirname "$0")
script=$(basename "$0")
source $kul_main_dir/KUL_main_functions.sh
# $cwd & $log_dir is made in main_functions
# A Function to provide Usage information
# - gives information about the script
function Usage {
cat <<USAGE
`basename $0` preproccesses an entire study
Usage:
`basename $0` -c config_file -b bids_dir
Required arguments:
-c: description of the subjects and settings for processing
Optional arguments:
-b: bids directory
-n: number of cores to use (distrubuted over mriqc/fmriprep/freesurfer/etc...)
-m: max memory (in gigabytes) available in docker
-t: temporary directory (default = /tmp)
-r: reset docker (clean the images and download new ones)
-v: verbose
-e: expert mode (uses a different config_file format)
Example:
`basename $0` -c study_config/subjects_and_options.csv -b BIDS -n 6 -m 12 -t /scratch -v
uses "study_config/subjects_and_options.csv" to
- reads the subjects (participants) on which to do processing
- reads what processing (mriqc/fmriprep/freesurfer/etc...) to do on those
- reads the options to give to mriqc/fmriprep/etc...
uses the (already converted) BIDS data in directory "BIDS"
uses 6 cores in total for all processes (distrubuted over mriqc/fmriprep/freesurfer/etc...)
uses & memory of 12 GB
- for fmriprep & mriqc
- (set this option equal to, or slightly less than what you specify in your docker prefences)
specifies that temporary data are written to /scratch
spits out more verbose logging to the terminal
USAGE
exit 1
}
# A Function to start mriqc processing (in parallel)
function task_mriqc_participant {
# check whether to use singularity-mriqc
mriqc_singularity=0
#echo $KUL_use_mriqc_singularity
if [ -z $KUL_use_mriqc_singularity ]; then
kul_echo " KUL_use_mriqc_singularity not set, using docker"
elif [ $KUL_use_mriqc_singularity -eq 1 ]; then
kul_echo " KUL_use_mriqc_singularity is set to 1, using it"
mriqc_singularity=1
fi
#echo $mriqc_singularity
mriqc_log_p=$(echo ${BIDS_participant} | sed -e 's/ /_/g' )
mriqc_log="${log_dir}/mriqc_${mriqc_log_p}.txt"
mriqc_act="${log_dir}/mriqc_${mriqc_log_p}_act"
#mkdir -p ${preproc}/log/mriqc
kul_e2cl " started (in parallel) mriqc on participant(s) $BIDS_participant (with options $mriqc_options, using $ncpu_mriqc cores, logging to $mriqc_log)" $log
if [ $mriqc_singularity -eq 1 ]; then
mkdir -p ./mriqc
mkdir -p ./mriqc_work_${mriqc_log_p}
local task_mriqc_cmd=$(echo "singularity run --cleanenv \
-B ${cwd}:/work \
$KUL_mriqc_singularity \
--participant_label $BIDS_participant \
$mriqc_options \
-w /work/mriqc_work_${mriqc_log_p} \
--n_procs $ncpu_mriqc --ants-nthreads $ncpu_mriqc_ants --mem_gb $mem_gb --no-sub \
/work/${bids_dir} /work/mriqc participant \
> $mriqc_log 2>&1 ")
else
mkdir ${cwd}/mriqc_home
local task_mriqc_cmd=$(echo "docker run -u $(id -u) --tmpfs /run --tmpfs /tmp --rm \
-v ${cwd}/mriqc_home:/home/bidsapp/ \
-v ${cwd}/${bids_dir}:/data -v ${cwd}/mriqc:/out \
poldracklab/mriqc:latest \
--participant_label $BIDS_participant \
$mriqc_options \
--n_procs $ncpu_mriqc --ants-nthreads $ncpu_mriqc_ants --mem_gb $mem_gb --no-sub \
/data /out participant \
> $mriqc_log 2>&1 ")
rm -rf ${cwd}/mriqc_home
fi
kul_echo " using cmd: $task_mriqc_cmd"
# now we start the parallel job
if [ $make_pbs_files_instead_of_running -eq 0 ]; then
eval $task_mriqc_cmd &
mriqc_pid="$!"
kul_echo " mriqc pid is $mriqc_pid"
sleep 2
#psrecord $mriqc_pid --log $mriqc_act.txt --plot $mriqc_act.png --interval 30 &
else
kul_echo " making a PBS file"
mkdir -p VSC
# echo $task_mriqc_cmd > VSC/pbs_task_mriqc.txt
task_command=$(echo "singularity run --cleanenv \
-B \${cwd}:/work \
-B \${cwd}:/data \
-B \${cwd}:/out \
\$KUL_mriqc_singularity \
--participant_label \$BIDS_participant \
\$mriqc_options \
-w /work/mriqc_work_\${mriqc_log_p} \
--n_procs \$ncpu_mriqc --ants-nthreads \$ncpu_mriqc_ants --mem_gb \$mem_gb --no-sub \
/work/\${bids_dir} /work/mriqc participant \
> \$mriqc_log 2>&1 ")
#chmod +x VSC/pbs_task_mriqc.sh
cp $kul_main_dir/VSC/master.pbs VSC/run_mriqc.pbs
perl -pi -e "s/##LP##/${pbs_lp}/g" VSC/run_mriqc.pbs
perl -pi -e "s/##CPU##/${pbs_cpu}/g" VSC/run_mriqc.pbs
perl -pi -e "s/##MEM##/${pbs_mem}/g" VSC/run_mriqc.pbs
esc_pbs_email=$(echo $pbs_email | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##EMAIL##/${esc_pbs_email}/g" VSC/run_mriqc.pbs
esc_pbs_walltime=$(echo $pbs_walltime | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##WALLTIME##/${esc_pbs_walltime}/g" VSC/run_mriqc.pbs
esc_pbs_singularity_fmriprep=$(echo $pbs_singularity_fmriprep | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##FMRIPREP##/${esc_pbs_singularity_fmriprep}/g" VSC/run_mriqc.pbs
esc_pbs_singularity_mriqc=$(echo $pbs_singularity_mriqc | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##MRIQC##/${esc_pbs_singularity_mriqc}/g" VSC/run_mriqc.pbs
esc_task_command=$(echo $task_command | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##COMMAND##/${esc_task_command}/g" VSC/run_mriqc.pbs
kul_echo $pbs_data_file
if [ ! -f $pbs_data_file ]; then
echo "cwd,BIDS_participant,mriqc_options,mriqc_log_p,ncpu_mriqc,ncpu_mriqc_ants,mem_gb,bids_dir,mriqc_log" > $pbs_data_file
fi
echo "$cwd,$BIDS_participant,$mriqc_options,$mriqc_log_p,$ncpu_mriqc,$ncpu_mriqc_ants,$mem_gb,$bids_dir,$mriqc_log" >> $pbs_data_file
fi
}
# A function to start fmriprep processing (in parallel)
function task_fmriprep {
# make log dir and clean_up before starting
#mkdir -p ${preproc}/log/fmriprep
#rm -fr ${cwd}/fmriprep_work_${fmriprep_log_p}
# check whether to use singularity-fmriprep
fmriprep_singularity=0
#echo $KUL_use_fmriprep_singularity
if [ -z $KUL_use_fmriprep_singularity ]; then
kul_echo " KUL_use_fmriprep_singularity not set, using docker"
elif [ $KUL_use_fmriprep_singularity -eq 1 ]; then
kul_echo " KUL_use_fmriprep_singularity is set to 1, using it"
fmriprep_singularity=1
fi
#echo $fmriprep_singularity
fmriprep_log_p=$(echo ${BIDS_participant} | sed -e 's/ /_/g' )
fmriprep_log=${log_dir}/fmriprep_${fmriprep_log_p}.txt
fmriprep_act=${log_dir}/fmriprep_${fmriprep_log_p}_act
#kul_e2cl " started (in parallel) fmriprep on participant ${BIDS_participant}... (with options $fmriprep_options, using $ncpu_fmriprep cores, logging to $fmriprep_log)" ${log}
if [ $fmriprep_singularity -eq 1 ]; then
mkdir -p ./fmriprep_work_${fmriprep_log_p}
local task_fmriprep_cmd=$(echo "singularity run --cleanenv \
-B ./fmriprep_work_${fmriprep_log_p}:/work \
-B .:/data \
-B .:/out \
-B ${freesurfer_license}:/opt/freesurfer/license.txt \
$KUL_fmriprep_singularity \
/data/${bids_dir} \
/out \
participant \
--participant_label ${BIDS_participant} \
-w /work \
--nthreads $ncpu_fmriprep --omp-nthreads $ncpu_fmriprep_ants \
--mem $mem_mb \
$fmriprep_options \
> $fmriprep_log 2>&1")
else
mkdir -p ${cwd}/fmriprep_work
task_fmriprep_cmd=$(echo "docker run --rm -u $(id -u) \
-v ${cwd}/${bids_dir}:/data \
-v ${cwd}:/out \
-v ${cwd}/fmriprep_work:/work \
-v ${freesurfer_license}:/opt/freesurfer/license.txt \
$fmriprep_filter_mount \
nipreps/fmriprep:${fmriprep_version} \
/data /out \
participant \
--participant_label ${BIDS_participant} \
-w /work \
--nthreads $ncpu_fmriprep --omp-nthreads $ncpu_fmriprep_ants \
--mem $mem_mb \
$fmriprep_options")
# > $fmriprep_log 2>&1")
fi
#kul_echo " using cmd: $task_fmriprep_cmd"
# Now start the parallel job
# echo $make_pbs_files_instead_of_running
if [ $make_pbs_files_instead_of_running -eq 1 ]; then
#eval $task_fmriprep_cmd &
#fmriprep_pid="$!"
#kul_echo " fmriprep pid is $fmriprep_pid"
#sleep 2
#psrecord $fmriprep_pid --include-children --log $fmriprep_act.txt --plot $fmriprep_act.png --interval 30 &
#else
kul_echo " making a PBS file"
mkdir -p VSC
task_command=$(echo "mkdir -p ./fmriprep_work_\${fmriprep_log_p}; \
singularity run --cleanenv \
-B ./\${bids_dir}:/data \
-B .:/out
-B ./fmriprep_work_\${fmriprep_log_p}:/work \
-B \$FS_LICENSE:/opt/freesurfer/license.txt \
\$KUL_fmriprep_singularity \
data \
out \
participant \
--participant_label \${BIDS_participant} \
-w /work \
--nthreads \$ncpu_fmriprep --omp-nthreads \$ncpu_fmriprep_ants \
--mem \$mem_mb \
\$fmriprep_options \
> \$fmriprep_log 2>&1")
cp $kul_main_dir/VSC/master.pbs VSC/run_fmriprep.pbs
perl -pi -e "s/##LP##/${pbs_lp}/g" VSC/run_fmriprep.pbs
perl -pi -e "s/##CPU##/${pbs_cpu}/g" VSC/run_fmriprep.pbs
perl -pi -e "s/##MEM##/${pbs_mem}/g" VSC/run_fmriprep.pbs
perl -pi -e "s/##PARTITION##/${pbs_partition}/g" VSC/run_fmriprep.pbs
esc_pbs_email=$(echo $pbs_email | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##EMAIL##/${esc_pbs_email}/g" VSC/run_fmriprep.pbs
esc_pbs_walltime=$(echo $pbs_walltime | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##WALLTIME##/${esc_pbs_walltime}/g" VSC/run_fmriprep.pbs
esc_pbs_singularity_fmriprep=$(echo $pbs_singularity_fmriprep | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##FMRIPREP##/${esc_pbs_singularity_fmriprep}/g" VSC/run_fmriprep.pbs
esc_pbs_singularity_mriqc=$(echo $pbs_singularity_mriqc | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##MRIQC##/${esc_pbs_singularity_mriqc}/g" VSC/run_fmriprep.pbs
esc_task_command=$(echo $task_command | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##COMMAND##/${esc_task_command}/g" VSC/run_fmriprep.pbs
kul_echo $pbs_data_file
if [ ! -f $pbs_data_file ]; then
echo "BIDS_participant,fmriprep_log_p,bids_dir,ncpu_fmriprep,ncpu_fmriprep_ants,mem_mb,fmriprep_options,fmriprep_log" > $pbs_data_file
fi
echo "$BIDS_participant,$fmriprep_log_p,$bids_dir,$ncpu_fmriprep,$ncpu_fmriprep_ants,$mem_mb,$fmriprep_options,$fmriprep_log" >> $pbs_data_file
fi
#kul_e2cl " done fmriprep on participant $BIDS_participant" $log
}
# A Function to start freesurfer processing (in parallel)
function task_freesurfer {
# check if already performed freesurfer
if [ $freesurfer_store_in_derivatives -eq 1 ];then
freesurfer_file_to_check=BIDS/derivatives/freesurfer/${BIDS_participant}_freesurfer_is.done
else
freesurfer_file_to_check=freesurfer/sub-${BIDS_participant}/${BIDS_participant}/scripts/recon-all.done
fi
if [ ! -f $freesurfer_file_to_check ]; then
freesurfer_log=${log_dir}/freesurfer_${BIDS_participant}.txt
#mkdir -p ${preproc}/log/freesurfer
kul_e2cl " started (in parallel) freesurfer recon-all on participant ${BIDS_participant}... (using $ncpu_freesurfer cores, logging to $freesurfer_log)" ${log}
# search if any sessions exist
search_sessions=($(find BIDS/sub-${BIDS_participant} -name "*_T1w.nii.gz" ! -name "*gadolinium*"))
num_sessions=${#search_sessions[@]}
kul_echo " Freesurfer processing: number T1w data in the BIDS folder: $num_sessions"
kul_echo " notably: ${search_sessions[@]}"
# make the freesurfer input string
freesurfer_invol=""
for i in `seq 0 $(($num_sessions-1))`; do
freesurfer_invol=" $freesurfer_invol -i ${search_sessions[$i]} "
done
#echo $freesurfer_invol
# test for options
# -useflair
fs_use_flair=""
if [[ $freesurfer_options =~ "-useflair" ]]; then
kul_echo " Option -useflair given"
# search if any sessions exist
search_sessions_flair=($(find BIDS/sub-${BIDS_participant} -type f | grep FLAIR.nii.gz))
# ####### CHANGE BACK!!!!!
#search_sessions_flair=($(find BIDS/sub-${BIDS_participant} -type f | grep ses-1 | grep FLAIR.nii.gz))
num_sessions_flair=${#search_sessions_flair[@]}
if [ $num_sessions_flair -gt 0 ]; then
kul_echo " Freesurfer processing: number of FLAIR data in the BIDS folder: $num_sessions_flair"
kul_echo " notably: ${search_sessions_flair[@]}"
# make the freesurfer input string
freesurfer_invol_flair=""
for i in `seq 0 $(($num_sessions-1))`; do
freesurfer_invol_flair=" $freesurfer_invol_flair -FLAIR ${search_sessions_flair[$i]} "
done
fs_use_flair=" $freesurfer_invol_flair -FLAIRpial "
fi
#echo $fs_use_flair
fi
# -fs_hippoT1T2
fs_hippoT1T2=""
if [[ $freesurfer_options =~ "-hippocampal-subfields-T1T2" ]]; then
kul_echo " Option -hippocampal-subfields-T1T2 given"
# search if any FLAIR sessions exist
search_sessions_flair2=($(find BIDS/sub-${BIDS_participant} -type f | grep FLAIR.nii.gz))
num_sessions_flair2=${#search_sessions_flair[@]}
if [ $num_sessions_flair2 -gt 0 ]; then
kul_echo " Freesurfer processing: number of FLAIR data in the BIDS folder: $num_sessions_flair"
kul_echo " notably: ${search_sessions_flair[@]}"
# make the freesurfer input string
freesurfer_invol_flair2=""
for i in `seq 0 $(($num_sessions-1))`; do
freesurfer_invol_flair2=" $freesurfer_invol_flair2 -hippocampal-subfields-T1T2 ${search_sessions_flair2[$i]} FLAIR-${i}"
done
fs_hippoT1T2=" $freesurfer_invol_flair2 -itkthreads $ncpu_freesurfer "
fi
#echo $fs_hippoT1T2
fi
#mkdir -p freesurfer
if [ $freesurfer_store_in_derivatives -eq 1 ];then
SUBJECTS_DIR="${cwd}/${bids_dir}/derivatives/freesurfer"
mkdir -p ${SUBJECTS_DIR}
fs_BIDS_participant="sub-$BIDS_participant"
notify_file=${SUBJECTS_DIR}/${BIDS_participant}_freesurfer_is.done
else
SUBJECTS_DIR=${cwd}/freesurfer/sub-${BIDS_participant}
fs_BIDS_participant=$BIDS_participant
notify_file=${SUBJECTS_DIR}_freesurfer_is.done
#start clean
rm -rf $SUBJECTS_DIR
mkdir -p $SUBJECTS_DIR
fi
export SUBJECTS_DIR
#echo $notify_file
task_freesurfer_cmd=$(echo "recon-all -subject $fs_BIDS_participant $freesurfer_invol \
$fs_use_flair $fs_hippoT1T2 $fs_options_direct -all -openmp $ncpu_freesurfer \
-parallel -notify $notify_file")
# > $freesurfer_log 2>&1 ")
#kul_echo " using cmd: $task_freesurfer_cmd"
#eval $task_freesurfer_cmd &
#freesurfer_pid="$!"
#kul_echo " freesurfer pid is $freesurfer_pid"
#sleep 2
#kul_e2cl " done freesufer on participant $BIDS_participant" $log
else
#freesurfer_pid=-1
kul_echo " freesurfer of subjet $BIDS_participant already done, skipping..."
fi
}
# A function to start KUL_dwiprep processing (in parallel)
function task_KUL_dwiprep {
# check if already performed KUL_dwiprep
dwiprep_file_to_check=dwiprep/sub-${BIDS_participant}/dwiprep_is_done.log
if [ ! -f $dwiprep_file_to_check ]; then
dwiprep_log=$log_dir/dwiprep_${BIDS_participant}.txt
#mkdir -p ${preproc}/log/dwiprep
#kul_e2cl " started (in parallel) KUL_dwiprep on participant ${BIDS_participant}... (using $ncpu_dwiprep cores, logging to $dwiprep_log)" ${log}
extra_options_synb0=""
if [ "$synbzero_disco_instead_of_topup" -eq 1 ]; then
extra_options_synb0=" -b "
fi
extra_options_revphase=""
if [ "$rev_phase_for_topup_only" -eq 1 ]; then
extra_options_revphase=" -r "
fi
extra_options_dwi2mask=""
if [ "$dwi2mask_method" -gt 0 ]; then
extra_options_dwi2mask=" -m $dwi2mask_method "
fi
task_dwiprep_cmd=$(echo "KUL_dwiprep.sh -p ${BIDS_participant} \
$extra_options_dwi2mask $extra_options_synb0 $extra_options_revphase -n $ncpu_dwiprep \
-d \"$dwipreproc_options\" -e \"${eddy_options} \" -v 1")
# > $dwiprep_log 2>&1 ")
#kul_echo " using cmd: $task_dwiprep_cmd"
if [ $make_pbs_files_instead_of_running -eq 1 ]; then
# Now we start the parallel job
#eval $task_dwiprep_cmd &
#dwiprep_pid="$!"
#kul_echo " KUL_dwiprep pid is $dwiprep_pid"
#sleep 2
#else
kul_echo " making a PBS file"
mkdir -p VSC
cp $kul_main_dir/VSC/master_dwiprep.pbs VSC/run_dwiprep.pbs
task_command=$(echo "KUL_dwiprep.sh -p \${BIDS_participant} \
$extra_options_dwi2mask $extra_options_synb0 $extra_options_revphase -n $ncpu_dwiprep \
-d \"$dwipreproc_options\" -e \"${eddy_options} \" -v 1 \
> \$dwiprep_log 2>&1 ")
kul_echo $task_command
perl -pi -e "s/##LP##/${pbs_lp}/g" VSC/run_dwiprep.pbs
perl -pi -e "s/##CPU##/${pbs_cpu}/g" VSC/run_dwiprep.pbs
perl -pi -e "s/##MEM##/${pbs_mem}/g" VSC/run_dwiprep.pbs
perl -pi -e "s/##PARTITION##/${pbs_partition}/g" VSC/run_dwiprep.pbs
esc_pbs_email=$(echo $pbs_email | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##EMAIL##/${esc_pbs_email}/g" VSC/run_dwiprep.pbs
esc_pbs_walltime=$(echo $pbs_walltime | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##WALLTIME##/${esc_pbs_walltime}/g" VSC/run_dwiprep.pbs
esc_pbs_singularity_fmriprep=$(echo $pbs_singularity_fmriprep | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##FMRIPREP##/${esc_pbs_singularity_fmriprep}/g" VSC/run_dwiprep.pbs
esc_task_command=$(echo $task_command | sed 's#\([]\!\(\)\#\%\@\*\$\/&\-\=[]\)#\\\1#g')
perl -pi -e "s/##COMMAND##/${esc_task_command}/g" VSC/run_dwiprep.pbs
pbs_data_file=VSC/pbs_data_dwiprep.csv
kul_echo $pbs_data_file
if [ ! -f $pbs_data_file ]; then
echo "BIDS_participant, dwiprep_log" > $pbs_data_file
fi
echo "$BIDS_participant, $dwiprep_log" >> $pbs_data_file
fi
else
dwiprep_pid=-1
kul_echo " KUL_dwiprep of participant $BIDS_participant already done, skipping..."
fi
}
# A Function to start KUL_dwiprep_anat processing
function task_KUL_dwiprep_anat {
# check if already performed KUL_dwiprep_anat
dwiprep_anat_file_to_check=dwiprep/sub-${BIDS_participant}/dwiprep_anat_is_done.log
if [ ! -f $dwiprep_anat_file_to_check ]; then
dwiprep_anat_log=${log_dir}/dwiprep_anat_${BIDS_participant}.txt
kul_e2cl " performing KUL_dwiprep_anat on subject ${BIDS_participant}... (using $dwiprep_anat_ncpu cores, logging to $dwiprep_anat_log)" ${log}
KUL_dwiprep_anat.sh -p ${BIDS_participant} -n $dwiprep_anat_ncpu -v \
> $dwiprep_anat_log 2>&1 &
dwiprep_anat_pid="$!"
kul_echo " KUL_dwiprep_anat pid is $dwiprep_anat_pid"
else
dwiprep_anat_pid=-1
kul_echo " KUL_dwiprep_anat of subjet $BIDS_participant already done, skipping..."
fi
}
# A Function to start KUL_dwiprep_MNI processing
function task_KUL_dwiprep_MNI {
# check if already performed KUL_dwiprep_MNI
dwiprep_MNI_file_to_check=dwiprep/sub-${BIDS_participant}/dwiprep_MNI_is_done.log
if [ ! -f $dwiprep_MNI_file_to_check ]; then
dwiprep_MNI_log=${log_dir}/dwiprep_MNI_${BIDS_participant}.txt
kul_e2cl " performing KUL_dwiprep_MNI on subject ${BIDS_participant}... (using $dwiprep_MNI_ncpu cores, logging to $dwiprep_MNI_log)" ${log}
KUL_dwiprep_MNI.sh -p ${BIDS_participant} -n $dwiprep_MNI_ncpu -v \
> $dwiprep_MNI_log 2>&1 &
dwiprep_MNI_pid="$!"
kul_echo " KUL_dwiprep_MNI pid is $dwiprep_MNI_pid"
else
dwiprep_MNI_pid=-1
kul_echo " KUL_dwiprep_MNI of subjet $BIDS_participant already done, skipping..."
fi
}
# A Function to start KUL_dwiprep_fibertract processing
function task_KUL_dwiprep_fibertract {
# check if already performed KUL_dwiprep_drtdbs
dwiprep_fibertract_file_to_check=dwiprep/sub-${BIDS_participant}/dwiprep_fibertract_is_done.log
if [ ! -f $dwiprep_fibertract_file_to_check ]; then
dwiprep_fibertract_log=${log_dir}/dwiprep_fibertract_${BIDS_participant}.txt
kul_e2cl " performing KUL_dwiprep_fibertract on subject ${BIDS_participant}... (using $dwiprep_fibertract_ncpu cores, logging to $dwiprep_fibertract_log)" ${log}
fibertract_wb_flag=""
#echo $dwiprep_fibertract_whole_brain
if [ $dwiprep_fibertract_whole_brain -eq 1 ]; then
fibertract_wb_flag=" -f "
fi
local task_dwiprep_fibertract_cmd=$(echo "KUL_dwiprep_fibertract.sh -p ${BIDS_participant} \
$fibertract_wb_flag -n $dwiprep_fibertract_ncpu -v \
-w $dwiprep_fibertract_response_file \
-c $dwiprep_fibertract_conf_file -r $dwiprep_fibertract_rois_file \
> $dwiprep_fibertract_log 2>&1 ")
kul_echo " using cmd: $task_dwiprep_fibertract_cmd"
eval $task_dwiprep_fibertract_cmd
else
kul_echo " KUL_dwiprep_fibertract of subjet $BIDS_participant already done, skipping..."
fi
}
# A function to start KUL_synb0 processing (in parallel)
function task_KUL_synb0 {
# check if already performed KUL_synb0
synb0_file_to_check=synb0/sub-${BIDS_participant}/synb0_is_done.log
if [ ! -f $synb0_file_to_check ]; then
synb0_log=$log_dir/synb0_${BIDS_participant}.txt
#mkdir -p ${preproc}/log/synb0
kul_e2cl " started (in parallel) KUL_synb0 on participant ${BIDS_participant}... (using $ncpu_synb0 cores, logging to $synb0_log)" ${log}
if [ "$cleanup_synb0" -eq 1 ]; then
extra_options_synb0=" -c "
fi
local task_synb0_cmd=$(echo "KUL_synb0.sh -p ${BIDS_participant} $extra_options_synb0 -n $ncpu_synb0 -v \
> $synb0_log 2>&1 ")
kul_echo " using cmd: $task_synb0_cmd"
if [ $make_pbs_files_instead_of_running -eq 0 ]; then
# Now we start the parallel job
eval $task_synb0_cmd &
synb0_pid="$!"
kul_echo " KUL_synb0 pid is $synb0_pid"
sleep 2
else
echo "not yet implemented"
# still to do
fi
else
synb0_pid=-1
kul_echo " KUL_dwiprep of participant $BIDS_participant already done, skipping..."
fi
}
function WaitForTaskCompletion {
local pidsArray=${waitforpids[@]} # pids to wait for, separated by semi-colon
local procsArray=${waitforprocs[@]} # name of procs to wait for, separated by semi-colon
local exit_on_error="false"
local soft_alert=0 # Does a soft alert need to be triggered, if yes, send an alert once
local log_ttime=0 # local time instance for comparaison
local seconds_begin=$SECONDS # Seconds since the beginning of the script
local exec_time=0 # Seconds since the beginning of this function
local retval=0 # return value of monitored pid process
local errorcount=0 # Number of pids that finished with errors
local pidCount # number of given pids
local c # counter for pids/procsArray
pidCount=${#pidsArray[@]}
#echo " pidCount: $pidCount"
#echo " pidsArray: ${pidsArray[@]}"
while [ ${#pidsArray[@]} -gt 0 ]; do
newPidsArray=()
newProcsArray=()
c=0
for pid in "${pidsArray[@]}"; do
#echo "pid: $pid"
#echo "proc: ${procsArray[c]}"
if kill -0 $pid > /dev/null 2>&1; then
newPidsArray+=($pid)
#echo "newPidsArray: ${newPidsArray[@]}"
newProcsArray+=(${procsArray[c]})
#echo "newProcsArray: ${newProcsArray[@]}"
else
wait $pid
result=$?
#echo "result: $result"
if [ $result -ne 0 ]; then
errorcount=$((errorcount+1))
echo " *** WARNING! **** Process ${procsArray[c]} with pid $pid FAILED (with exitcode [$result]). Check the log-file"
else
echo " Process ${procsArray[c]} with pid $pid finished successfully (with exitcode [$result])."
fi
fi
c=$((c+1))
done
## Log a standby message every hour
every_time=1201
exec_time=$(($SECONDS - $seconds_begin))
if [ $((($exec_time + 1) % $every_time)) -eq 0 ]; then
if [ $log_ttime -ne $exec_time ]; then
log_ttime=$exec_time
log_min=$((log_ttime / 60))
echo " Current tasks [${procsArray[@]}] still running after $log_min minutes with pids [${pidsArray[@]}]."
fi
fi
pidsArray=("${newPidsArray[@]}")
procsArray=("${newProcsArray[@]}")
sleep 1
done
}
# end of local function --------------
# ------ MAIN STARTS HERE-----------------------------------
# Set some defaults
silent=1
verbose_level=1
ncpu=6
mem_gb=24
bids_dir=BIDS
expert=0
make_pbs_files_instead_of_running=0
tmp=/tmp
# Set flags
conf_flag=0
bids_flag=0
tmp_flag=0
cpu_flag=0
mem_flag=0
docker_reset_flag=0
# Check command line options, and return function Usage if required options are not given
if [ "$#" -lt 2 ]; then
Usage >&2
exit 1
else
while getopts "c:b:n:m:t:ervh" OPT; do
case $OPT in
c) #config_file
conf_flag=1
conf=$OPTARG
;;
b) #bids_dir
bids_flag=1
bids_dir=$OPTARG
;;
n) #ncpu
cpu_flag=1
ncpu=$OPTARG
;;
m) #bids_dir
mem_flag=1
mem_gb=$OPTARG
;;
t) #temporary directory
tmp_flag=1
tmp=$OPTARG
;;
r) #reset docker
docker_reset_flag=1
;;
v) #verbose
silent=0
verbose_level=2
;;
e) #expert
expert=1
;;
h) #help
Usage >&2
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
echo
Usage >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
echo
Usage >&2
exit 1
;;
esac
done
fi
# check for required options
if [ $conf_flag -eq 0 ] ; then
echo
echo "Option -c is required: give the path to the file that describes the subjects" >&2
echo
exit 2
elif [ ! -f $conf ] ; then
echo
echo "The config file $conf does not exist"
echo
exit
fi
# ----------- MAIN ----------------------------------------------------------------------------------
#kul_echo " The script you are running has basename `basename "$0"`, located in dirname $kul_main_dir"
#kul_echo " The present working directory is `pwd`"
#echo "verbose_level: $verbose_level"
# ---------- SET MAIN DEFAULTS ---
# set mem_mb for mriqc/fmriprep
gb=1024
mem_mb=$(echo $mem_gb $gb | awk '{print $1 * $2 }')
# freesurfer license (check if set as environent variable, if not set hard coded)
if [ -z $FS_LICENSE ]; then
kul_echo " freesurfer_license was not found; setting it hard to /KUL_apps/freesurfer/license.txt"
freesurfer_license=/KUL_apps/freesurfer/license.txt
else
freesurfer_license=$FS_LICENSE
kul_echo " freesurfer_license was set before (notably: $freesurfer_license)"
fi
# ---------- PROCESS CONTROL & LOAD BALANCING --------
# We will be running 4 preprocessings in parallel: mriqc, fmriprep, freesurfer & KUL_dwiprep
# We need to do some load balancing #FLAG, needs optimisation, a.o. if some processes finished already!
# set number of cores for task mriqc
load_mriqc=37 # higher number means less cpu need (mriqc does not need much)
ncpu_mriqc=$(((($ncpu/$load_mriqc))+1))
ncpu_mriqc_ants=$(((($ncpu/$load_mriqc))+1))
# set number of cores for task fmriprep
load_fmriprep=5
ncpu_fmriprep=$(((($ncpu/$load_fmriprep))+1))
ncpu_fmriprep_ants=$(((($ncpu/$load_fmriprep))+1))
# set number of cores for task freesurfer
load_freesurfer=2
ncpu_freesurfer=$(((($ncpu/$load_freesurfer))+1))
# set number of cores for task KUL_dwiprep
load_dwiprep=2
ncpu_dwiprep=$(((($ncpu/$load_dwiprep))+1))
ncpu_synb0=1
# Ask if docker needs to be reset
if [ $docker_reset_flag -eq 1 ];then
docker system prune -a
fi
# ----------- STEP 1 - Preprocess each subject with mriqc, fmriprep, freesurfer and KUL_dwiprep ---
if [ $expert -eq 1 ]; then
# Expert mode
kul_echo " Using Expert mode"
# check exit_after
exit_after=$(grep exit_after $conf | grep -v \# | sed 's/[^0-9]//g')
if [ -z "$exit_after" ]; then
exit_after=0
fi
kul_echo " exit_after: $exit_after"
#check make_pbs_files_instead_of_running
make_pbs_files_instead_of_running=$(grep make_pbs_files_instead_of_running $conf | grep -v \# | sed 's/[^0-9]//g')
if [ -z "$make_pbs_files_instead_of_running" ]; then
make_pbs_files_instead_of_running=0
fi
kul_echo " make_pbs_files_instead_of_running: $make_pbs_files_instead_of_running"
if [ $make_pbs_files_instead_of_running -eq 1 ]; then
pbs_cpu=$(grep pbs_cpu $conf | grep -v \# | sed 's/[^0-9]//g')
pbs_mem=$(grep pbs_mem $conf | grep -v \# | sed 's/[^0-9]//g')
pbs_lp=$(grep pbs_lp $conf | grep -v \# | cut -d':' -f 2 | tr -d '\r')
pbs_email=$(grep pbs_email $conf | grep -v \# | cut -d':' -f 2 | tr -d '\r')
pbs_walltime=$(grep pbs_walltime $conf | grep -v \# | cut -d':' -f 2- | tr -d '\r')
pbs_singularity_mriqc=$(grep pbs_singularity_mriqc $conf | grep -v \# | cut -d':' -f 2 | tr -d '\r')
pbs_singularity_fmriprep=$(grep pbs_singularity_fmriprep $conf | grep -v \# | cut -d':' -f 2 | tr -d '\r')
kul_echo " pbs_cpu: $pbs_cpu"
kul_echo " pbs_mem: $pbs_mem"
kul_echo " pbs_lp: $pbs_lp"
kul_echo " pbs_email: $pbs_email"
kul_echo " pbs_walltime: $pbs_walltime"
kul_echo " pbs_singularity_mriqc: ${pbs_singularity_mriqc}"
kul_echo " pbs_singularity_fmriprep: $pbs_singularity_fmriprep"
#mriqc_rand=$(cat /dev/urandom | env LC_CTYPE=C tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)
#pbs_data_file="pbs_data_mriqc_${mriqc_rand}.csv"
fi
#check mriqc and options
do_mriqc=$(grep do_mriqc $conf | grep -v \# | sed 's/[^0-9]//g')
if [ -z "$do_mriqc" ]; then
do_mriqc=0
fi
kul_echo " do_mriqc: $do_mriqc"
if [ $do_mriqc -eq 1 ]; then
mriqc_options=$(grep mriqc_options $conf | grep -v \# | cut -d':' -f 2 | tr -d '\r')
mriqc_ncpu=$(grep mriqc_ncpu $conf | grep -v \# | sed 's/[^0-9]//g' | tr -d '\r')
ncpu_mriqc=$mriqc_ncpu
ncpu_mriqc_ants=$mriqc_ncpu
mriqc_mem=$(grep mriqc_mem $conf | grep -v \# | sed 's/[^0-9]//g' | tr -d '\r')
mem_gb=$mriqc_mem
#get bids_participants
BIDS_subjects=($(grep BIDS_participants $conf | grep -v \# | cut -d':' -f 2 | tr -d '\r'))
n_subj=${#BIDS_subjects[@]}
mriqc_simultaneous=$(grep mriqc_simultaneous $conf | grep -v \# | sed 's/[^0-9]//g' | tr -d '\r')
if [ $make_pbs_files_instead_of_running -eq 1 ]; then
mriqc_simultaneous_pbs=$(($mriqc_simultaneous-1))
mriqc_simultaneous=1
else
mriqc_simultaneous_pbs=0
fi
kul_echo " mriqc_options: $mriqc_options"
kul_echo " mriqc_ncpu: $mriqc_ncpu"
kul_echo " mriqc_mem: $mriqc_mem"
kul_echo " BIDS_participants: ${BIDS_subjects[@]}"
kul_echo " number of BIDS_participants: $n_subj"
kul_echo " mriqc_simultaneous: $mriqc_simultaneous"
kul_echo " mriqc_simultaneous_pbs: $mriqc_simultaneous_pbs"
# check if already performed mriqc
todo_bids_participants=()
already_done=()
for i_bids_participant in $(seq 0 $(($n_subj-1))); do
mriqc_dir_to_check=mriqc/sub-${BIDS_subjects[$i_bids_participant]}
#echo $mriqc_dir_to_check
if [ ! -d $mriqc_dir_to_check ]; then
todo_bids_participants+=(${BIDS_subjects[$i_bids_participant]})
else
already_done+=(${BIDS_subjects[$i_bids_participant]})
fi
done
kul_echo " mriqc was already done for participant(s) ${already_done[@]}"
# submit the jobs (and split them in chucks)
n_subj_todo=${#todo_bids_participants[@]}
task_number=1
task_counter=1
for i_bids_participant in $(seq 0 $mriqc_simultaneous $(($n_subj_todo-1))); do
mriqc_participants=${todo_bids_participants[@]:$i_bids_participant:$mriqc_simultaneous}
#echo " going to start mriqc with $mriqc_simultaneous participants simultaneously, notably $mriqc_participants"