From 041a84f184a276eff602a6937cb10d92d9eedaaf Mon Sep 17 00:00:00 2001 From: Samuele Cancellieri <32717860+samuelecancellieri@users.noreply.github.com> Date: Tue, 9 Mar 2021 00:10:47 +0100 Subject: [PATCH] update the repo --- .crisprme_off_risk_card | 797 + .editorconfig | 0 .gitattributes | 4 + .gitignore | 10 + .vscode/settings.json | 5 + Dockerfile | 21 + LICENSE | 2 + OldScripts/annotation_samples.py | 250 + OldScripts/annotator_for_onlyvar.old.py | 946 + OldScripts/app_simplified.py | 770 + OldScripts/app_simplified_bootstrap.py | 1061 + OldScripts/app_tabs.py | 1040 + OldScripts/app_v4.py | 1430 + OldScripts/app_v5.py | 2675 + OldScripts/calc_samples.py | 174 + OldScripts/calc_samples_faster.py | 203 + OldScripts/cluster.dict.old.py | 142 + OldScripts/ext_seq_pam_creation.sh | 12 + OldScripts/extract_subcluster.awk | 6 + OldScripts/generate_report_samples.py | 780 + OldScripts/only_cluster.py | 83 + OldScripts/pam_analysis.py | 173 + OldScripts/pam_creation.py | 253 + OldScripts/reassign_sample_to_cluster.py | 40 + OldScripts/scores_guide_table.old.py | 300 + OldScripts/submit_job.sh | 300 + OldScripts/submit_job.test.sh | 339 + OldScripts/summary_by_guide_table.py | 102 + OldScripts/summary_position.py | 103 + OldScripts/tmp_top1_annotation.py | 54 + PostProcess/20130606_sample_info.xlsx | Bin 0 -> 990287 bytes PostProcess/CFDGraph.py | 105 + PostProcess/CRISPRme_plots.py | 167 + PostProcess/PAM_scores.pkl | 51 + PostProcess/__init__.py | 0 PostProcess/add_genome.py | 160 + PostProcess/add_risk_score.py | 23 + PostProcess/adjust_cols.py | 31 + PostProcess/aggiunta_genoma_test_script.sh | 25 + PostProcess/analisi_indels_NNN.py | 1069 + PostProcess/analisi_indels_NNN.sh | 136 + PostProcess/annotate_final_results.py | 55 + PostProcess/annotator.differences | 7 + PostProcess/annotator_cal_sample.py | 1302 + PostProcess/annotator_for_onlyvar.py | 1270 + PostProcess/assemble_cfd_graphs.py | 45 + PostProcess/automated_search.py | 221 + PostProcess/automated_search.sh | 273 + PostProcess/automated_search_good_parallel.sh | 294 + .../automated_search_good_parallel_v2.sh | 352 + .../azimuth/FC_plus_RES_withPredictions.csv | 5311 + PostProcess/azimuth/__init__.py | 0 .../azimuth/azure_models/V3_model_full.pickle | Bin 0 -> 134654 bytes .../azure_models/V3_model_nopos.pickle | Bin 0 -> 134654 bytes PostProcess/azimuth/cli_run_model.py | 54 + PostProcess/azimuth/cluster_job.py | 118 + PostProcess/azimuth/corrstats.py | 120 + .../data/FC_plus_RES_withPredictions.csv | 5311 + PostProcess/azimuth/data/V1_data.xlsx | Bin 0 -> 158503 bytes PostProcess/azimuth/data/V1_suppl_data.txt | 2145 + PostProcess/azimuth/data/V2_data.xlsx | Bin 0 -> 3786809 bytes PostProcess/azimuth/features/__init__.py | 0 PostProcess/azimuth/features/featurization.py | 557 + PostProcess/azimuth/features/microhomology.py | 102 + PostProcess/azimuth/load_data.py | 481 + PostProcess/azimuth/local_multiprocessing.py | 30 + PostProcess/azimuth/metrics.py | 656 + PostProcess/azimuth/model_comparison.py | 647 + PostProcess/azimuth/models/DNN.py | 74 + PostProcess/azimuth/models/GP.py | 114 + PostProcess/azimuth/models/__init__.py | 0 PostProcess/azimuth/models/baselines.py | 97 + PostProcess/azimuth/models/ensembles.py | 214 + PostProcess/azimuth/models/gpy_ssk.py | 56 + PostProcess/azimuth/models/regression.py | 287 + PostProcess/azimuth/models/ssk.py | 38 + PostProcess/azimuth/predict.py | 373 + .../azimuth/saved_models/V3_model_full.pickle | Bin 0 -> 127000 bytes .../saved_models/V3_model_nopos.pickle | Bin 0 -> 127128 bytes PostProcess/azimuth/tests/1000guides.csv | 948 + .../azimuth/tests/test_saved_models.py | 23 + PostProcess/azimuth/util.py | 1332 + PostProcess/bed_for_ref_seq.py | 19 + PostProcess/change_dict.py | 176 + PostProcess/cluster.dict.py | 379 + PostProcess/compact_ref_var.py | 49 + PostProcess/correct_positions_targets.py | 39 + PostProcess/create_dict.py | 21 + PostProcess/creazione_dizionari.py | 45 + PostProcess/creazione_dizionari_unzipped.py | 81 + PostProcess/creazione_dizionari_zipped.py | 80 + PostProcess/crisprme.py | 821 + PostProcess/db_creation.py | 168 + PostProcess/extract_top.py | 39 + PostProcess/extraction.sh | 45 + PostProcess/indel_extraction.py | 42 + PostProcess/indel_process.py | 23 + PostProcess/indel_to_matrix.py | 120 + PostProcess/indels_process.sh | 93 + PostProcess/integrate_logs.py | 50 + PostProcess/merge_alt_chr.sh | 31 + PostProcess/merge_close_targets.zip | Bin 0 -> 1836 bytes PostProcess/merge_close_targets_cfd.sh | 27 + PostProcess/mismatch_score.pkl | 1178 + PostProcess/new_simple_analysis.py | 509 + PostProcess/personal_cards.py | 146 + PostProcess/pool_indels.py | 67 + PostProcess/pool_index_indels.py | 32 + PostProcess/pool_post_analisi_indel.py | 42 + PostProcess/pool_post_analisi_snp.py | 41 + PostProcess/pool_search_indels.py | 65 + PostProcess/populations_distribution.py | 151 + PostProcess/position_for_indels.py | 35 + PostProcess/post_analisi_indel.sh | 40 + PostProcess/post_analisi_snp.sh | 38 + PostProcess/post_analysis_only.py | 237 + PostProcess/post_analysis_only.sh | 230 + PostProcess/post_process.sh | 34 + PostProcess/process_summaries.py | 162 + PostProcess/query_manager.py | 122 + PostProcess/radar_chart.py | 419 + PostProcess/radar_chart_OLD.py | 392 + PostProcess/readme.txt | 19 + PostProcess/remove_bad_indel_targets.py | 72 + PostProcess/remove_contiguous_samples_cfd.py | 142 + PostProcess/replace_indels.py | 53 + PostProcess/resultIntegrator.py | 353 + PostProcess/samples_1000genomeproject.txt | 3501 + PostProcess/scores_guide_table.py | 326 + PostProcess/scriptAnalisiNNN_v3.sh | 121 + PostProcess/search_only.py | 171 + PostProcess/search_only.sh | 226 + PostProcess/simpleAnalysis_v3.py | 1691 + PostProcess/submit_job.final.sh | 414 + PostProcess/submit_job_automated.sh | 456 + PostProcess/submit_job_automated_new.sh | 504 + PostProcess/summary_by_guide.py | 124 + PostProcess/summary_by_guide_position.py | 224 + PostProcess/summary_by_samples.py | 153 + PostProcess/supportFunctions/__init__.py | 0 .../dictionary_creation_indels.py | 85 + .../supportFunctions/loadSample/__init__.py | 0 .../loadSample/associateSample.py | 102 + PostProcess/update_dict.py | 149 + PostProcess/vuoto.txt | 0 README.md | 55 + app.py | 51 + assets/37143442.png | Bin 0 -> 10871 bytes assets/Img/.keep | 1 + assets/bWLwgP.css | 414 + assets/byufwb.png | Bin 0 -> 40932 bytes assets/favicon.ico | Bin 0 -> 165662 bytes assets/flex_containers.css | 291 + assets/header.css | 10 + assets/helpPage/advOpt.PNG | Bin 0 -> 18237 bytes assets/helpPage/allowed.PNG | Bin 0 -> 8798 bytes assets/helpPage/crRNA.PNG | Bin 0 -> 13246 bytes assets/helpPage/email.PNG | Bin 0 -> 9336 bytes assets/helpPage/genome.png | Bin 0 -> 11092 bytes assets/helpPage/guides.PNG | Bin 0 -> 23431 bytes assets/helpPage/homeCRISPRme.PNG | Bin 0 -> 67779 bytes assets/helpPage/pam.png | Bin 0 -> 31192 bytes assets/helpPage/sequence.PNG | Bin 0 -> 23080 bytes assets/helpPage/submit.png | Bin 0 -> 565 bytes assets/helpPage/warning.png | Bin 0 -> 25968 bytes assets/loader.css | 25 + assets/main_page.png | Bin 0 -> 40932 bytes assets/overlay.css | 87 + assets/placeholder.png | Bin 0 -> 12282 bytes assets/resultPage/populationDistribution.png | Bin 0 -> 41966 bytes assets/resultPage/resultsSummary.png | Bin 0 -> 28984 bytes assets/resultPage/summaryByGraphic.PNG | Bin 0 -> 150545 bytes .../summaryByGraphic_population.PNG | Bin 0 -> 151079 bytes assets/resultPage/summaryByGraphic_sample.PNG | Bin 0 -> 153069 bytes assets/resultPage/summaryByGraphicaGecko.png | Bin 0 -> 86725 bytes assets/resultPage/summaryByGuide.png | Bin 0 -> 27708 bytes assets/resultPage/summaryByPosition.png | Bin 0 -> 25701 bytes assets/resultPage/summaryBySamples.png | Bin 0 -> 25177 bytes assets/typography.css | 54 + assets/waitPage/jobDone.png | Bin 0 -> 5395 bytes assets/waitPage/loadPage.png | Bin 0 -> 14333 bytes crisprme.py | 878 + docsCrisprme/crisprme_off.html | 14047 ++ docsCrisprme/fileCreated.tsv | 42 + environment/environment_droplet.yml | 137 + environment/environment_for_python_3_8.yml | 33 + guides/1000fake.txt | 1000 + guides/1000guides.txt | 1000 + guides/100fake.txt | 100 + guides/100guides.txt | 100 + guides/10guides.txt | 10 + guides/1guides.txt | 1 + guides/EMX1.txt | 1 + guides/PNAS_guide.xlsx | Bin 0 -> 708623 bytes guides/anne_guides.txt | 4 + guides/ccr5.guide.txt | 124 + guides/ccr5.reference.extended_profile.xls | 0 guides/ccr5.reference.profile.xls | 0 guides/ccr5.reference.targets.txt | 0 guides/cpf1.txt | 1 + guides/gecko.guide.txt | 111671 +++++++++++++++ guides/guides_for_NRG.txt | 2 + guides/p53.guide.txt | 288 + guides/pcsk9.guide.txt | 516 + guides/sg1617.txt | 1 + guides/single_anne_guide.txt | 1 + guides/therapeutic_gRNA.txt | 2544 + guides/vcfguide.txt | 1 + guides/viral_gRNA.txt | 612 + index.py | 92 + pages/ChooseFiles.py | 601 + pages/GUImessage.py | 42 + pages/UpdateDict.py | 576 + pages/__init__.py | 0 pages/annotations.py | 138 + pages/change_dict.py | 176 + pages/contacts_page.py | 56 + pages/creazione_dizionari.py | 52 + pages/genome_database.py | 188 + pages/help_page.py | 297 + pages/history_page.py | 369 + pages/load_page.py | 315 + pages/main_page.py | 1240 + pages/navbar_creation.py | 66 + pages/personalization_page.py | 405 + pages/results_page.py | 3774 + pages/send_mail.py | 57 + pam/20bp-NAA-spCas9.txt | 1 + pam/20bp-NGA-spCas9.txt | 1 + pam/20bp-NGG-spCas9.txt | 1 + pam/20bp-NGK-spCas9.txt | 1 + pam/20bp-NNGT-spCas9.txt | 1 + pam/20bp-NNN-spCas9.txt | 1 + pam/20bp-NRG-spCas9.txt | 1 + pam/20bp-TTCN-CasX.txt | 1 + pam/NGTN-23bp-ShCASTAcCAST.txt | 1 + pam/TTTV-21bp-Cas12a(Cpf1).txt | 1 + pam/TTTV-23bp-Cas12a(Cpf1).txt | 1 + .../samples_VCFs_1000_genome_project.txt | 3501 + samplesID/hg38_HGDP/HGDP.samplesID.txt | 930 + .../samples_VCFs_1000_genome_project.txt | 3501 + seq_script/__init__.py | 0 seq_script/convert_pam.py | 79 + seq_script/extract_seq.py | 31 + 244 files changed, 202522 insertions(+) create mode 100644 .crisprme_off_risk_card create mode 100644 .editorconfig create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 .vscode/settings.json create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 OldScripts/annotation_samples.py create mode 100644 OldScripts/annotator_for_onlyvar.old.py create mode 100644 OldScripts/app_simplified.py create mode 100644 OldScripts/app_simplified_bootstrap.py create mode 100644 OldScripts/app_tabs.py create mode 100644 OldScripts/app_v4.py create mode 100644 OldScripts/app_v5.py create mode 100644 OldScripts/calc_samples.py create mode 100644 OldScripts/calc_samples_faster.py create mode 100644 OldScripts/cluster.dict.old.py create mode 100644 OldScripts/ext_seq_pam_creation.sh create mode 100644 OldScripts/extract_subcluster.awk create mode 100644 OldScripts/generate_report_samples.py create mode 100644 OldScripts/only_cluster.py create mode 100644 OldScripts/pam_analysis.py create mode 100644 OldScripts/pam_creation.py create mode 100644 OldScripts/reassign_sample_to_cluster.py create mode 100644 OldScripts/scores_guide_table.old.py create mode 100644 OldScripts/submit_job.sh create mode 100644 OldScripts/submit_job.test.sh create mode 100644 OldScripts/summary_by_guide_table.py create mode 100644 OldScripts/summary_position.py create mode 100644 OldScripts/tmp_top1_annotation.py create mode 100644 PostProcess/20130606_sample_info.xlsx create mode 100644 PostProcess/CFDGraph.py create mode 100644 PostProcess/CRISPRme_plots.py create mode 100644 PostProcess/PAM_scores.pkl create mode 100644 PostProcess/__init__.py create mode 100644 PostProcess/add_genome.py create mode 100644 PostProcess/add_risk_score.py create mode 100644 PostProcess/adjust_cols.py create mode 100644 PostProcess/aggiunta_genoma_test_script.sh create mode 100644 PostProcess/analisi_indels_NNN.py create mode 100644 PostProcess/analisi_indels_NNN.sh create mode 100644 PostProcess/annotate_final_results.py create mode 100644 PostProcess/annotator.differences create mode 100644 PostProcess/annotator_cal_sample.py create mode 100644 PostProcess/annotator_for_onlyvar.py create mode 100644 PostProcess/assemble_cfd_graphs.py create mode 100644 PostProcess/automated_search.py create mode 100644 PostProcess/automated_search.sh create mode 100644 PostProcess/automated_search_good_parallel.sh create mode 100644 PostProcess/automated_search_good_parallel_v2.sh create mode 100644 PostProcess/azimuth/FC_plus_RES_withPredictions.csv create mode 100644 PostProcess/azimuth/__init__.py create mode 100644 PostProcess/azimuth/azure_models/V3_model_full.pickle create mode 100644 PostProcess/azimuth/azure_models/V3_model_nopos.pickle create mode 100644 PostProcess/azimuth/cli_run_model.py create mode 100644 PostProcess/azimuth/cluster_job.py create mode 100644 PostProcess/azimuth/corrstats.py create mode 100644 PostProcess/azimuth/data/FC_plus_RES_withPredictions.csv create mode 100644 PostProcess/azimuth/data/V1_data.xlsx create mode 100644 PostProcess/azimuth/data/V1_suppl_data.txt create mode 100644 PostProcess/azimuth/data/V2_data.xlsx create mode 100644 PostProcess/azimuth/features/__init__.py create mode 100644 PostProcess/azimuth/features/featurization.py create mode 100644 PostProcess/azimuth/features/microhomology.py create mode 100644 PostProcess/azimuth/load_data.py create mode 100644 PostProcess/azimuth/local_multiprocessing.py create mode 100644 PostProcess/azimuth/metrics.py create mode 100644 PostProcess/azimuth/model_comparison.py create mode 100644 PostProcess/azimuth/models/DNN.py create mode 100644 PostProcess/azimuth/models/GP.py create mode 100644 PostProcess/azimuth/models/__init__.py create mode 100644 PostProcess/azimuth/models/baselines.py create mode 100644 PostProcess/azimuth/models/ensembles.py create mode 100644 PostProcess/azimuth/models/gpy_ssk.py create mode 100644 PostProcess/azimuth/models/regression.py create mode 100644 PostProcess/azimuth/models/ssk.py create mode 100644 PostProcess/azimuth/predict.py create mode 100644 PostProcess/azimuth/saved_models/V3_model_full.pickle create mode 100644 PostProcess/azimuth/saved_models/V3_model_nopos.pickle create mode 100644 PostProcess/azimuth/tests/1000guides.csv create mode 100644 PostProcess/azimuth/tests/test_saved_models.py create mode 100644 PostProcess/azimuth/util.py create mode 100644 PostProcess/bed_for_ref_seq.py create mode 100644 PostProcess/change_dict.py create mode 100644 PostProcess/cluster.dict.py create mode 100644 PostProcess/compact_ref_var.py create mode 100644 PostProcess/correct_positions_targets.py create mode 100644 PostProcess/create_dict.py create mode 100644 PostProcess/creazione_dizionari.py create mode 100644 PostProcess/creazione_dizionari_unzipped.py create mode 100644 PostProcess/creazione_dizionari_zipped.py create mode 100644 PostProcess/crisprme.py create mode 100644 PostProcess/db_creation.py create mode 100644 PostProcess/extract_top.py create mode 100644 PostProcess/extraction.sh create mode 100644 PostProcess/indel_extraction.py create mode 100644 PostProcess/indel_process.py create mode 100644 PostProcess/indel_to_matrix.py create mode 100644 PostProcess/indels_process.sh create mode 100644 PostProcess/integrate_logs.py create mode 100644 PostProcess/merge_alt_chr.sh create mode 100644 PostProcess/merge_close_targets.zip create mode 100644 PostProcess/merge_close_targets_cfd.sh create mode 100644 PostProcess/mismatch_score.pkl create mode 100644 PostProcess/new_simple_analysis.py create mode 100644 PostProcess/personal_cards.py create mode 100644 PostProcess/pool_indels.py create mode 100644 PostProcess/pool_index_indels.py create mode 100644 PostProcess/pool_post_analisi_indel.py create mode 100644 PostProcess/pool_post_analisi_snp.py create mode 100644 PostProcess/pool_search_indels.py create mode 100644 PostProcess/populations_distribution.py create mode 100644 PostProcess/position_for_indels.py create mode 100644 PostProcess/post_analisi_indel.sh create mode 100644 PostProcess/post_analisi_snp.sh create mode 100644 PostProcess/post_analysis_only.py create mode 100644 PostProcess/post_analysis_only.sh create mode 100644 PostProcess/post_process.sh create mode 100644 PostProcess/process_summaries.py create mode 100644 PostProcess/query_manager.py create mode 100644 PostProcess/radar_chart.py create mode 100644 PostProcess/radar_chart_OLD.py create mode 100644 PostProcess/readme.txt create mode 100644 PostProcess/remove_bad_indel_targets.py create mode 100644 PostProcess/remove_contiguous_samples_cfd.py create mode 100644 PostProcess/replace_indels.py create mode 100644 PostProcess/resultIntegrator.py create mode 100644 PostProcess/samples_1000genomeproject.txt create mode 100644 PostProcess/scores_guide_table.py create mode 100644 PostProcess/scriptAnalisiNNN_v3.sh create mode 100644 PostProcess/search_only.py create mode 100644 PostProcess/search_only.sh create mode 100644 PostProcess/simpleAnalysis_v3.py create mode 100644 PostProcess/submit_job.final.sh create mode 100644 PostProcess/submit_job_automated.sh create mode 100644 PostProcess/submit_job_automated_new.sh create mode 100644 PostProcess/summary_by_guide.py create mode 100644 PostProcess/summary_by_guide_position.py create mode 100644 PostProcess/summary_by_samples.py create mode 100644 PostProcess/supportFunctions/__init__.py create mode 100644 PostProcess/supportFunctions/dictionaryIndel/dictionary_creation_indels.py create mode 100644 PostProcess/supportFunctions/loadSample/__init__.py create mode 100644 PostProcess/supportFunctions/loadSample/associateSample.py create mode 100644 PostProcess/update_dict.py create mode 100644 PostProcess/vuoto.txt create mode 100644 README.md create mode 100644 app.py create mode 100644 assets/37143442.png create mode 100644 assets/Img/.keep create mode 100644 assets/bWLwgP.css create mode 100644 assets/byufwb.png create mode 100644 assets/favicon.ico create mode 100644 assets/flex_containers.css create mode 100644 assets/header.css create mode 100644 assets/helpPage/advOpt.PNG create mode 100644 assets/helpPage/allowed.PNG create mode 100644 assets/helpPage/crRNA.PNG create mode 100644 assets/helpPage/email.PNG create mode 100644 assets/helpPage/genome.png create mode 100644 assets/helpPage/guides.PNG create mode 100644 assets/helpPage/homeCRISPRme.PNG create mode 100644 assets/helpPage/pam.png create mode 100644 assets/helpPage/sequence.PNG create mode 100644 assets/helpPage/submit.png create mode 100644 assets/helpPage/warning.png create mode 100644 assets/loader.css create mode 100644 assets/main_page.png create mode 100644 assets/overlay.css create mode 100644 assets/placeholder.png create mode 100644 assets/resultPage/populationDistribution.png create mode 100644 assets/resultPage/resultsSummary.png create mode 100644 assets/resultPage/summaryByGraphic.PNG create mode 100644 assets/resultPage/summaryByGraphic_population.PNG create mode 100644 assets/resultPage/summaryByGraphic_sample.PNG create mode 100644 assets/resultPage/summaryByGraphicaGecko.png create mode 100644 assets/resultPage/summaryByGuide.png create mode 100644 assets/resultPage/summaryByPosition.png create mode 100644 assets/resultPage/summaryBySamples.png create mode 100644 assets/typography.css create mode 100644 assets/waitPage/jobDone.png create mode 100644 assets/waitPage/loadPage.png create mode 100644 crisprme.py create mode 100644 docsCrisprme/crisprme_off.html create mode 100644 docsCrisprme/fileCreated.tsv create mode 100644 environment/environment_droplet.yml create mode 100644 environment/environment_for_python_3_8.yml create mode 100644 guides/1000fake.txt create mode 100644 guides/1000guides.txt create mode 100644 guides/100fake.txt create mode 100644 guides/100guides.txt create mode 100644 guides/10guides.txt create mode 100644 guides/1guides.txt create mode 100644 guides/EMX1.txt create mode 100644 guides/PNAS_guide.xlsx create mode 100644 guides/anne_guides.txt create mode 100644 guides/ccr5.guide.txt create mode 100644 guides/ccr5.reference.extended_profile.xls create mode 100644 guides/ccr5.reference.profile.xls create mode 100644 guides/ccr5.reference.targets.txt create mode 100644 guides/cpf1.txt create mode 100644 guides/gecko.guide.txt create mode 100644 guides/guides_for_NRG.txt create mode 100644 guides/p53.guide.txt create mode 100644 guides/pcsk9.guide.txt create mode 100644 guides/sg1617.txt create mode 100644 guides/single_anne_guide.txt create mode 100644 guides/therapeutic_gRNA.txt create mode 100644 guides/vcfguide.txt create mode 100644 guides/viral_gRNA.txt create mode 100644 index.py create mode 100644 pages/ChooseFiles.py create mode 100644 pages/GUImessage.py create mode 100644 pages/UpdateDict.py create mode 100644 pages/__init__.py create mode 100644 pages/annotations.py create mode 100644 pages/change_dict.py create mode 100644 pages/contacts_page.py create mode 100644 pages/creazione_dizionari.py create mode 100644 pages/genome_database.py create mode 100644 pages/help_page.py create mode 100644 pages/history_page.py create mode 100644 pages/load_page.py create mode 100644 pages/main_page.py create mode 100644 pages/navbar_creation.py create mode 100644 pages/personalization_page.py create mode 100644 pages/results_page.py create mode 100644 pages/send_mail.py create mode 100644 pam/20bp-NAA-spCas9.txt create mode 100644 pam/20bp-NGA-spCas9.txt create mode 100644 pam/20bp-NGG-spCas9.txt create mode 100644 pam/20bp-NGK-spCas9.txt create mode 100644 pam/20bp-NNGT-spCas9.txt create mode 100644 pam/20bp-NNN-spCas9.txt create mode 100644 pam/20bp-NRG-spCas9.txt create mode 100644 pam/20bp-TTCN-CasX.txt create mode 100644 pam/NGTN-23bp-ShCASTAcCAST.txt create mode 100644 pam/TTTV-21bp-Cas12a(Cpf1).txt create mode 100644 pam/TTTV-23bp-Cas12a(Cpf1).txt create mode 100644 samplesID/1000G/samples_VCFs_1000_genome_project.txt create mode 100644 samplesID/hg38_HGDP/HGDP.samplesID.txt create mode 100644 samplesID/hg38_ref_VCF_a_caso/samples_VCFs_1000_genome_project.txt create mode 100644 seq_script/__init__.py create mode 100644 seq_script/convert_pam.py create mode 100644 seq_script/extract_seq.py diff --git a/.crisprme_off_risk_card b/.crisprme_off_risk_card new file mode 100644 index 0000000..e3bc322 --- /dev/null +++ b/.crisprme_off_risk_card @@ -0,0 +1,797 @@ +''' +CRISPRme performs predictive analysis and result assessment on population and individual specific CRISPR/Cas experiments. +CRISPRme enumerates on- and off-target accounting simultaneously for substitutions, DNA/RNA bulges and common genetic variants from the 1000 +genomes project. CRISPRme is based on CRISPRitz (Cancellieri, Samuele, et al. "Crispritz: rapid, high-throughput, and variant-aware in silico off-target site identification for crispr genome editing." Bioinformatics (2019).) +a software tool for population target analyses. CRISPRme is devoted to individual specific on- and off-target analyses. + +Documentation updated up to 07/07/2020 + +Launch crisprme: + ++ python crisprme_off.py ++ gunicorn -b :8080 crisprme_off:app.server + +Generate docs: + ++ pdoc crisprme_off.py -o docsCrisprme/ --force --html +''' +# IMPORT FROM ADDITIONAL PAGES AND GUI +from GUI import UpdateDict +from GUI import navbar_creation +from GUI import GUImessage +from GUI import get_genomes +from GUI import creazione_dizionari +from GUI import ChooseFiles +from GUI import annotations +from additional_pages import send_mail +from additional_pages import results_page +from additional_pages import load_page +# from additional_pages import history_page +from additional_pages import help_page +# from additional_pages import get_results +from additional_pages import get_genomes +from additional_pages import genomes_page +from additional_pages import contacts_page +from app import app +from app import URL +from index import cache +from os.path import isfile, isdir, join # for getting directories +import webbrowser as wb # Open CRISPRme on browser +import concurrent.futures # For workers and queue +import re # For sort chr filter values +from datetime import datetime # For time when job submitted +import collections # For check if guides are the same in two results +import filecmp # check if Params files are equals +import sys # for sys.exit() +import random # for job id +import string # for job id +# from flask_caching import Cache # for cache of .targets or .scores + +import time # measure time for loading df table +import json # for getting and saving report images list +import pandas as pd # for dash table +import io # for decoding upload content +import base64 # for decoding upload content +from os import listdir # for getting directories +from tkinter import filedialog, END, IntVar, messagebox +from PostProcess import CFDGraph +from PostProcess.supportFunctions.loadSample import associateSample +import math +from seq_script import extract_seq, convert_pam + +import os +from os import getcwd +import subprocess +from dash.exceptions import PreventUpdate +# IMPORT DASH +import dash_table +import dash_daq as daq +import dash_html_components as html +import dash_core_components as dcc +import dash_bootstrap_components as dbc +from dash.dependencies import Input, Output, State +import dash +import matplotlib +matplotlib.use('Agg') +try: + from GUI import GUImessage as Gmsg +except ImportError: + pass + +try: + import Tkinter as tk +except ImportError: + import tkinter as tk + +try: + import ttk + py3 = False +except ImportError: + import tkinter.ttk as ttk + py3 = True + +# Warning symbol \u26A0 +app_location = os.path.realpath(__file__) +print(app_location) +app_main_directory = os.path.dirname(app_location) + '/' # This for scripts +current_working_directory = os.getcwd() + '/' # This for files + +exeggutor = concurrent.futures.ProcessPoolExecutor(max_workers=2) + + +if ONLINE: + DISPLAY_OFFLINE = 'none' + DISPLAY_ONLINE = '' +else: + DISPLAY_OFFLINE = '' + DISPLAY_ONLINE = 'none' + + +# Populations 1000 gen proj +# population_1000gp = { +# 'EAS':['CHB', 'JPT', 'CHS', 'CDX', 'KHV'], +# 'EUR':['CEU', 'TSI', 'FIN', 'GBR', 'IBS'], +# 'AFR':['YRI', 'LWK', 'GWD', 'MSL', 'ESN', 'ASW', 'ACB'], +# 'AMR':['MXL', 'PUR', 'CLM', 'PEL'], +# 'SAS':['GIH', 'PJL', 'BEB', 'STU', 'ITU'] +# } +# dict_pop_to_superpop = {'CHB':'EAS', 'JPT':'EAS', 'CHS':'EAS', 'CDX':'EAS', 'KHV':'EAS', +# 'CEU':'EUR', 'TSI':'EUR', 'FIN':'EUR', 'GBR':'EUR', 'IBS':'EUR', +# 'YRI':'AFR', 'LWK':'AFR', 'GWD':'AFR', 'MSL':'AFR', 'ESN':'AFR', 'ASW':'AFR', 'ACB':'AFR', +# 'MXL':'AMR', 'PUR':'AMR', 'CLM':'AMR', 'PEL':'AMR', +# 'GIH':'SAS', 'PJL':'SAS', 'BEB':'SAS', 'STU':'SAS', 'ITU':'SAS' +# } +# List of all samples +# pop_file = pd.read_excel(os.path.dirname(os.path.realpath(__file__)) + '/PostProcess/20130606_sample_info.xlsx') +# all_samples = pop_file.Sample.to_list() +# all_pop = pop_file.Population.to_list() +# dict_pop = dict() +# dict_sample_to_pop = dict() +# for pos, i in enumerate(all_pop): +# try: +# dict_pop[i].append(all_samples[pos]) +# except: +# dict_pop[i] = [all_samples[pos]] + +# dict_sample_to_pop[all_samples[pos]] = i +# dropdown_all_samples = [{'label': sam, 'value' : sam} for sam in all_samples] +# Dropdown available genomes + + +# Dropdown available PAM + + +# "https://images.plot.ly/logo/new-branding/plotly-logomark.png" + + +# Test page, go to /test-page to see + +# test_page = html.Div(final_list, style = {'margin':'1%'}) + + +# TEST PAGE 2 +# final_list = [] + +# test_page2 = html.Div(final_list, style={'margin': '1%'}) + +# # TEST page3 for new result page +# final_list = [] + +# test_page3 = html.Div(final_list, style={'margin': '1%'}) + +# # ABOUT PAGE + +# #about_page = html.Div(help_page.helpPage(), style = {'margin':'1%'}) +# # Contacts page +# final_list = [] +# contacts_page = html.Div(contacts_page.contactPage(), style={'margin': '1%'}) + + +##################################################CALLBACKS################################################## +# Test callbacks + + +# General function for selecting a Directory or a file +# def openDialog(n, type_ask, start_dir='./'): +# if n is None: +# raise PreventUpdate +# root = tk.Tk() +# root.withdraw() +# if type_ask == 'D': +# selected = filedialog.askdirectory( +# initialdir=current_working_directory + start_dir) +# else: +# selected = filedialog.askopenfilename( +# initialdir=current_working_directory + start_dir) +# root.destroy() +# if selected == '' or selected == '()': +# selected = 'None' +# return str(selected) + +# Callbacks for generating filebrowser for addition of new genome + + +# @app.callback( +# [Output('selected-referencegenome', 'children'), +# Output('full-path-refgen', 'children')], +# [Input('button-select-refgenome', 'n_clicks')] +# ) +# def fileDialogRefGenome(n): +# dir_opened = openDialog(n, 'D', 'Genomes') +# if len(dir_opened) > 0: +# return 'Selected: ' + dir_opened.split('/')[-1], dir_opened +# else: +# return 'Selected: None', 'None' + + +# @app.callback( +# [Output('selected-pamfile', 'children'), +# Output('full-path-pam', 'children')], +# [Input('button-select-pam', 'n_clicks')] +# ) +# def fileDialogPam(n): +# file_opened = openDialog(n, 'F') +# if len(file_opened) > 0 and file_opened != "()": +# return 'Selected: ' + file_opened.split('/')[-1], file_opened +# else: +# return 'Selected: None', 'None' + + +# @app.callback( +# [Output('selected-annotationfile', 'children'), +# Output('full-path-annotation', 'children')], +# [Input('button-select-annotation', 'n_clicks')] +# ) +# def fileDialogAnnotation(n): +# file_opened = openDialog(n, 'F', 'annotations') +# if len(file_opened) > 0 and file_opened != "()": +# return 'Selected: ' + file_opened.split('/')[-1], file_opened +# else: +# return 'Selected: None', 'None' + + +# @app.callback( +# [Output('selected-vcf', 'children'), +# Output('full-path-vcf', 'children')], +# [Input('button-select-vcf', 'n_clicks')] +# ) +# def fileDialogVCF(n): +# dir_opened = openDialog(n, 'D') +# if len(dir_opened) > 0: +# return 'Selected: ' + dir_opened.split('/')[-1], dir_opened +# else: +# return 'Selected: None', 'None' + + +# @app.callback( +# [Output('selected-vcf-dict', 'children'), +# Output('full-path-vcf-dict', 'children')], +# [Input('button-select-vcf-dict', 'n_clicks')] +# ) +# def fileDialogVCF_dict(n): +# dir_opened = openDialog(n, 'D') +# if len(dir_opened) > 0: +# return 'Selected: ' + dir_opened.split('/')[-1], dir_opened +# else: +# return 'Selected: None', 'None' + + +# @app.callback( +# [Output('selected-sampleIDfile', 'children'), +# Output('full-path-samples', 'children')], +# [Input('button-select-sampleID', 'n_clicks')] +# ) +# def fileDialogSamplesID(n): +# file_opened = openDialog(n, 'F', 'samplesID') +# if len(file_opened) > 0 and file_opened != "()": +# return 'Selected: ' + file_opened.split('/')[-1], file_opened +# else: +# return 'Selected: None', 'None' + + +# @app.callback( +# [Output('selected-samples-dict', 'children'), +# Output('full-path-samples-dict', 'children')], +# [Input('button-select-samples-dict', 'n_clicks')] +# ) +# def fileDialogSamplesID_dict(n): +# file_opened = openDialog(n, 'F', 'samplesID') +# if len(file_opened) > 0 and file_opened != "()": +# return 'Selected: ' + file_opened.split('/')[-1], file_opened +# else: +# return 'Selected: None', 'None' + + +# @app.callback( +# [Output('selected-dictionary', 'children'), +# Output('full-path-dictionary', 'children')], +# [Input('button-select-dictionary', 'n_clicks')] +# ) +# def fileDialogDict(n): +# dir_opened = openDialog(n, 'D', 'dictionaries') +# if len(dir_opened) > 0 and dir_opened != "()": +# return 'Selected: ' + dir_opened.split('/')[-1], dir_opened +# else: +# return 'Selected: None', 'None' + + +# Check input presence for updating dictionaries + + +# Callback creazione nuovo genoma + + +# Callback per aggiornare la barra progresso + + +# # Insert/Delete example input + + +# @app.callback( +# [Output('available-genome', 'value'), +# Output('available-pam', 'value'), +# Output('text-guides', 'value'), +# Output('mms', 'value'), +# Output('dna', 'value'), +# Output('rna', 'value'), +# Output('len-guide-sequence-ver', 'value'), +# Output('text-sequence', 'value')], +# [Input('example-parameters', 'n_clicks_timestamp'), +# Input('remove-parameters', 'n_clicks_timestamp')] +# ) +# def inExample(nI, nR): +# ''' +# Inserts an example value in all fields to show the user an example result, or reset the input fields. NOT available in OFFLINE version. + +# ***Args*** + +# + [**nI**] **example-parameters** (*n_clicks_timestamp*): button that inserts the example values +# + [**nR**] **remove-parameters** (*n_clicks_timestamp*): button that removes the values and resets all the fields + +# ***Returns*** + +# + **available-genome** (*value*): 'hg38 ref+hg38 1000genomeproject' as input example, or '' to reset the value +# + **available-pam** (*value*): '20bp-NGG-SpCas9' as input example, or '' to reset the value +# + **text-guides** (*value*): 'GAGTCCGAGCAGAAGAAGAA\\nCCATCGGTGGCCGTTTGCCC' as input example, or '' to reset the value +# + **mms** (*value*): '4' as input example, or '' to reset the value +# + **dna** (*value*): '1' as input example, or '' to reset the value +# + **rna** (*value*): '1' as input example, or '' to reset the value +# + **len-guide-sequence-ver** (*value*): '20' as input example, or '' to reset the value +# + **text-sequence** (*value*): '>sequence\\nTACCCCAAACGCGGAGGCGCCTCGGGAAGGCGAGGTGGGCAAGTTCAATGCCAAGCGTGACGGGGGA' as input example, +# or '' to reset the value +# ''' + +# if (nI is None) and (nR is None): +# raise PreventUpdate + +# if nI is None: +# nI = 0 + +# if nR is None: +# nR = 0 + +# if nI > 0: +# if nI > nR: +# return 'hg38 ref+hg38 1000genomeproject', '20bp-NGG-SpCas9', 'GAGTCCGAGCAGAAGAAGAA\nCCATCGGTGGCCGTTTGCCC', '4', '1', '1', '20', '>sequence\nTACCCCAAACGCGGAGGCGCCTCGGGAAGGCGAGGTGGGCAAGTTCAATGCCAAGCGTGACGGGGGA' + +# if nR > 0: +# if nR > nI: +# return '', '', '', '', '', '', '', '' + +# If selected genome has a '+', update advanced options comparison with reference + + +# @app.callback( +# Output('checkbox-ref-comp', 'checked'), +# [Input('available-genome', 'value')] +# ) +# def suggestComparison(value): +# ''' +# Update to Checked the Option 'Compare your results with the corresponding reference genome' if an Enriched Genome is selected ('+' is in the +# name of the genome) + +# ***Args*** + +# + [**value**] **available-genome** (*value*): the name of the genome selected by the user in the Dropdown + +# ***Returns*** + +# + **checkbox-ref-comp** [*checked*]: True if an Enriched genome is selected +# ''' +# if value is None: +# raise PreventUpdate +# if '+' in value: +# return True +# raise PreventUpdate + + +# Fade in guide len dropdown for sequence tabs version + + +# @app.callback( +# Output('fade-len-guide', 'is_in'), +# [Input('tabs', 'active_tab')], +# [State('fade-len-guide', 'is_in')] +# ) +# def resetTab(current_tab, is_in): +# ''' +# Manages the fading of the Dropdown for the guide length when the tab 'Sequence' is active. + +# ***Args*** + +# + [**current_tab**] **tabs** (*active_tab*): string of the ID of the current active tab +# + [**is_in**] **fade-len-guide** (*is_in*): True if the Dropdown guide length element is displayed, False otherwise + +# ***Returns*** + +# + **fade-len-guide** (*is_in*): True in order to show the Dropdown guide length element, False to hide it +# ''' +# if current_tab is None: +# raise PreventUpdate + +# if current_tab == 'guide-tab': +# return False +# return True + + +''' +@app.callback( + [Output('page-content', 'children'), + Output('job-link', 'children')], + [Input('url', 'href')], + [State('url','pathname'), + State('url','search'), + State('url','hash')] + # [Input('url', 'href')], + # [State('url','pathname'), + # State('url','search'),State('url','hash')] +) +''' +# When url changed, load new page + + +# #Callback to populate the tab, note that it's called when the result_page is loaded (dash implementation), so we do not use raise update to block this first callback +# @app.callback( +# [Output('signal','children'), +# Output('result-table','page_current'), +# Output('result-table', "sort_by"), +# Output('result-table','filter_query')], +# [Input('url', 'pathname')], +# [State('url', 'search')] +# ) +# def populateTable(pathname, search): +# print('pathname', pathname) +# if pathname != '/result': +# raise PreventUpdate + +# job_id = search.split('=')[-1] +# job_directory = current_working_directory + 'Results/' + job_id + '/' +# print('job dir', job_directory) +# if(not isdir(job_directory)): +# return 'not_exists', 0, [], '' +# #global_store(job_id) +# print('ok') +# return job_id, 0, [], '' + +# Send the data when next or prev button is clicked on the result table + + +''' +, + tooltip_data=[ + { + column: {'value': str(value), 'type': 'markdown'} + for column, value in row.items() + } for row in ans.to_dict('records') + ], + tooltip_duration=None + ) +''' + + +# def callback_img(): +# if (check_existance_sample): +# print("Todo") +# else: +# warning_no_res = dbc.Alert( +# "No results were found with the given parameters", color="warning") + + +# def generate_table_position(dataframe, id_table, page, guide = '', job_id = '', max_rows = 10): #NOTE v1 della tabella posizioni +# ''' +# Per generare una html table. NOTE è diversa da una dash dataTable +# ''' +# rows_remaining = len(dataframe) - (page - 1) * max_rows + +# return html.Table( +# # Header +# [html.Tr([html.Th(col) for col in dataframe.columns]) ] + +# # Body +# [html.Tr([ +# html.Td(html.A(dataframe.iloc[i + (page - 1)*max_rows][col], href = 'result?job=' + job_id + '#' + guide +'-Pos-' + str(dataframe.iloc[i + (page - 1)*max_rows]['Chromosome']) + '-' + str(dataframe.iloc[i + (page - 1)*max_rows]['Position']) , target = '_blank' )) if col == '' else html.Td(dataframe.iloc[i + (page - 1)*max_rows][col]) for col in dataframe.columns +# ]) for i in range(min(rows_remaining, max_rows))], +# style = {'display':'inline-block'}, +# id = id_table +# ) + + +# NOTE v2 della tabella posizioni #TODO modifica layout righe per allinearle + + +''' +#Callback to filter chr from Summary by Position table, and to show next/prev page +@app.callback( + [Output('div-table-position', 'children'), + Output('div-current-page-table-position', 'children')], + [Input('prev-page-position','n_clicks_timestamp'), + Input('next-page-position', 'n_clicks_timestamp'), + Input('div-position-filter-query', 'children')], + [State('button-filter-position', 'n_clicks_timestamp'), + State('url', 'search'), + State('general-profile-table', 'selected_cells'), + State('general-profile-table', 'data'), + State('div-current-page-table-position', 'children'), + State('div-mms-bulges-position', 'children')] +) +def filterPositionTable(nPrev, nNext, filter_q, n, search, sel_cel, all_guides, current_page, mms_bulge): + if sel_cel is None: + raise PreventUpdate + if nPrev is None and nNext is None and n is None: + raise PreventUpdate + + if nPrev is None: + nPrev = 0 + if nNext is None: + nNext = 0 + if n is None: + n = 0 + + filter_q = filter_q.split(',') + chr = filter_q[0] + if chr == 'None': + chr = None + pos_begin = filter_q[1] + if pos_begin == 'None': + pos_begin = None + pos_end = filter_q[2] + if pos_end == 'None': + pos_end = None + + current_page = current_page.split('/')[0] + current_page = int(current_page) + mms = int(mms_bulge.split('-')[0]) + max_bulges = int(mms_bulge.split('-')[1]) + btn_position_section = [] + btn_position_section.append(n) + btn_position_section.append(nPrev) + btn_position_section.append(nNext) + job_id = search.split('=')[-1] + job_directory = current_working_directory + 'Results/' + job_id + '/' + guide = all_guides[int(sel_cel[0]['row'])]['Guide'] + if max(btn_position_section) == n: #Last button pressed is filtering, return the first page of the filtered table + if pos_begin is None or pos_begin == '': + pos_begin = 0 + if pos_end == '': + pos_end = None + if pos_end: + if int(pos_end) < int(pos_begin): + pos_end = None + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t') + + df.rename(columns = {'#Chromosome':'Chromosome'}, inplace = True) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + if chr is None or chr == '': + max_page = len(df.index) + max_page = math.floor(max_page / 10) + 1 + return generate_table_position(df, 'table-position', 1, mms, max_bulges,guide, job_id ), '1/' + str(max_page) + if pos_end is None: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) )].index , inplace = True) + else: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) | (df['Position'] > int(pos_end)))].index , inplace = True) + max_page = len(df.index) + max_page = math.floor(max_page / 10) + 1 + return generate_table_position(df, 'table-position', 1, mms, max_bulges,guide, job_id ), '1/'+ str(max_page) + else: + + if max(btn_position_section) == nNext: + current_page = current_page + 1 + if chr: + if pos_begin is None or pos_begin == '': + pos_begin = 0 + if pos_end == '': + pos_end = None + if pos_end: + if int(pos_end) < int(pos_begin): + pos_end = None + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t') + else: + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t')#, nrows = current_page * 10) + df.rename(columns = {'#Chromosome':'Chromosome'}, inplace = True) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + if chr: + if pos_end is None: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) )].index , inplace = True) + else: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) | (df['Position'] > int(pos_end)))].index , inplace = True) + if ((current_page - 1) * 10) > len(df): + current_page = current_page -1 + if current_page < 1: + current_page = 1 + max_page = len(df.index) + max_page = math.floor(max_page / 10) + 1 + return generate_table_position(df, 'table-position', current_page, mms, max_bulges,guide, job_id ), str(current_page) + '/' + str(max_page) + else: #Go to previous page + current_page = current_page - 1 + if current_page < 1: + current_page = 1 + + if chr: + if pos_begin is None or pos_begin == '': + pos_begin = 0 + if pos_end == '': + pos_end = None + if pos_end: + if int(pos_end) < int(pos_begin): + pos_end = None + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t') + else: + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t')#, nrows = current_page * 10) + df.rename(columns = {'#Chromosome':'Chromosome'}, inplace = True) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + if chr: + if pos_end is None: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) )].index , inplace = True) + else: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) | (df['Position'] > int(pos_end)))].index , inplace = True) + + max_page = len(df.index) + max_page = math.floor(max_page / 10) + 1 + return generate_table_position(df, 'table-position', current_page, mms, max_bulges,guide, job_id ), str(current_page) + '/' + str(max_page) +''' +# Input('prev-page-position','n_clicks_timestamp'), +#Input('next-page-position', 'n_clicks_timestamp'), + + +# #Save the first scomposition target from the second table, in order to highlight it in the first table +# @app.callback( +# Output('target-to-highlight','children'), +# [Input('table-scomposition-cluster', 'data')], +# [State('target-to-highlight','children')] +# ) +# def saveFirstScomposedTarget(data_scomp, current_target): +# if current_target != '' or current_target is None: +# raise PreventUpdate +# if data_scomp is None or not data_scomp: +# raise PreventUpdate +# return data_scomp[0]['DNA'] + +# #update the Color of the top1 scomposed target in the first table +# @app.callback( +# Output('table-position-target', 'style_data_conditional'), +# [Input('target-to-highlight', 'children')] +# ) +# def highlightSummaryTarget(to_highlight): +# if to_highlight is None or to_highlight == '': +# raise PreventUpdate +# return [{'if': {'filter_query': '{DNA} eq ' + to_highlight}, 'font-weight':'bold'}] + +# Filter/sort cluster +# Filter and sorting sample targets + + +############################ Genome Database Page Callbacks ########################## + + +############################ History Page ########################## + +# def get_results(): +# ''' +# Get a dataframe of the Results directory +# ''' +# results_dirs = [join(current_working_directory + '/Results/', f) for f in listdir(current_working_directory + '/Results/') if isdir( +# join(current_working_directory + '/Results/', f)) and isfile(current_working_directory + '/Results/' + f + '/Params.txt')] +# results_dirs.sort(key=os.path.getctime) # Sorted older first +# results_dirs = [os.path.basename(f) for f in results_dirs] +# col = ['Job ID', 'Genome', 'PAM', 'Mismatches', 'DNA Bulges', +# 'RNA Bulges', 'Gecko Comparison', 'Reference Comparison', 'Date', 'Load'] +# a = pd.DataFrame(columns=col) +# for job in results_dirs: +# if os.path.exists(current_working_directory + '/Results/' + job + '/Params.txt'): +# with open(current_working_directory + '/Results/' + job + '/Params.txt') as p: +# all_params = p.read() +# mms = (next(s for s in all_params.split('\n') +# if 'Mismatches' in s)).split('\t')[-1] +# genome_selected = (next(s for s in all_params.split( +# '\n') if 'Genome_selected' in s)).split('\t')[-1] +# genome_selected = genome_selected.replace('_', ' ') +# if os.path.exists(current_working_directory + '/Results/' + job + '/log.txt'): +# with open(current_working_directory + '/Results/' + job + '/log.txt') as lo: +# all_log = lo.read() +# job_start = (next(s for s in all_log.split( +# '\n') if 'Job\tStart' in s)).split('\t')[-1] +# try: +# job_end = (next(s for s in all_log.split( +# '\n') if 'Job\tDone' in s)).split('\t')[-1] +# except: +# link_load = URL + '/load?job=' + job +# else: +# link_load = URL + '/result?job=' + job +# else: +# job_start = 'n/a' +# link_load = URL + '/load?job=' + job +# dna = (next(s for s in all_params.split( +# '\n') if 'DNA' in s)).split('\t')[-1] +# rna = (next(s for s in all_params.split( +# '\n') if 'RNA' in s)).split('\t')[-1] +# pam = (next(s for s in all_params.split( +# '\n') if 'Pam' in s)).split('\t')[-1] +# gecko = (next(s for s in all_params.split( +# '\n') if 'Gecko' in s)).split('\t')[-1] +# if gecko == 'True': +# gecko = 'Yes' +# else: +# gecko = 'No' +# comparison = (next(s for s in all_params.split( +# '\n') if 'Ref_comp' in s)).split('\t')[-1] +# if comparison == 'True': +# comparison = 'Yes' +# else: +# comparison = 'No' +# if os.path.exists(current_working_directory + '/Results/' + job + '/guides.txt'): +# with open(current_working_directory + '/Results/' + job + '/guides.txt') as g: +# n_guides = str(len(g.read().strip().split('\n'))) +# else: +# n_guides = 'n/a' + +# a = a.append({'Job ID': job, 'Genome': genome_selected, 'Mismatches': mms, 'DNA Bulges': dna, +# 'RNA Bulges': rna, 'PAM': pam, 'Gecko Comparison': gecko, 'Reference Comparison': comparison, 'Date': job_start, 'Load': link_load, 'Delete': ''}, ignore_index=True) +# a = a.sort_values(['Mismatches', 'DNA Bulges', 'RNA Bulges'], +# ascending=[True, True, True]) +# return a + + +# Callback to update the hidden div filter of history page + + +############################ GUI CALLBACKS ######################### +# # Add a new genome +# @app.callback( +# Output("genome-job", "value"), +# [Input("add-genome", "n_clicks")], +# ) +# def add_genome(nAdd): +# """ +# Bottone per avviare la GUI in Tkinter per l'aggiunta di genomi offline +# """ +# raise PreventUpdate +# from GUI import ChooseFiles as cf +# if nAdd is None: +# raise PreventUpdate +# cf.startChooseFiles(current_working_directory, app_location + 'GUI/') +# return '' + +# Update an existing dictionary + + +# @app.callback( +# Output("dict-job", "value"), +# [Input("update-dict", "n_clicks")], +# ) +# def update_dict(nUpd): +# """ +# Bottone per avviare la GUI in Tkinter per l'aggiornamento di dizionari +# """ +# raise PreventUpdate +# from GUI import UpdateDict as ud +# if nUpd is None: +# raise PreventUpdate +# ud.startUpdateDict(current_working_directory) +# return '' + + +# # Open filedialog for choosing new annotation file +# @app.callback( +# [Output('label-new-annotation-selected', 'children'), +# Output('tooltip-label-new-annotation-selected', 'children')], +# [Input('button-choose-new-annotation', 'n_clicks')] +# ) +# def fileDialogUpdateAnnotation(n): +# selected_file = openDialog(n, 'F') +# return 'Selected: ' + os.path.basename(selected_file), 'Full Path: ' + selected_file + + +def start_browser(): + wb.open("http://127.0.0.1:8080/result?job=Q47PXDTBC8") + + # BUG nel filtering se ho, in min mismatch etc, la stringa '-', che non è considerata numero + # NOTE: l'ordinamento su Samples Summary o su Samples è fatto su stringhe, e non su numero di samples (potrebbe essere più utile) + # BUG see https://github.com/plotly/dash/issues/1049; Location component is called twice, meaning that two grep can occure at once. diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..e69de29 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..7142dc0 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +# Auto detect text files and perform LF normalization +* text=auto + +* text eol=lf diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..93c9d00 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +Genomes/* +genome_library/* +Results/* +dictionaries/* +VCF/* +annotations/* +*.pyc +gencode/* +Cache/* +*nohup* diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..9072ab2 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "editor.formatOnSave": true, + "editor.formatOnType": true, + "editor.formatOnPaste": true +} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..f24c045 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +# Set the base image to anaconda python 3.8 +FROM continuumio/miniconda3 + +# File Author / Maintainer +# MAINTAINER Samuele Cancelleri + +ENV SHELL bash + +#update conda channel with bioconda and conda-forge +RUN conda config --add channels defaults +RUN conda config --add channels conda-forge +RUN conda config --add channels bioconda + +#update packages of the docker system +RUN apt-get update && apt-get install gsl-bin libgsl0-dev -y && apt-get install libgomp1 -y && apt-get clean + +#Install crisprme package (change the dafault version of python to 3.8) +RUN conda update -n base -c defaults conda +RUN conda install python=3.8 -y +RUN conda install crisprme -y && conda clean --all -y +RUN conda update crisprme -y diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..456a677 --- /dev/null +++ b/LICENSE @@ -0,0 +1,2 @@ +CRISRPme has a dual license. It is made available for free to academic researchers under the Affero License (https://www.gnu.org/licenses/agpl-3.0.en.html). +If you plan to use the CRISRPme for-profit, you will need to purchase a license. Please contact rosalba.giugno@univr.it and lpinello@mgh.harvard.edu for more information. diff --git a/OldScripts/annotation_samples.py b/OldScripts/annotation_samples.py new file mode 100644 index 0000000..f78ac34 --- /dev/null +++ b/OldScripts/annotation_samples.py @@ -0,0 +1,250 @@ +''' +Script that annotatets the samples, in order to have a fast generate-report +Input file is job_id.top_1.samples, job_id.Annotations.targets., job_id.Annotation.summary.txt, result name +Create a dict for the guides, that contains a dict for the samples, that contains a dict for the annotatio category +Eg +{ + GUIDE1 -> { + SAMPLE1 -> { + EXON -> [0 0 0 0 1 0 0 8 2 0], + INTRONS -> [0 0 0 0 1 0 0 8 2 0], + CTCF -> [0 0 0 0 1 0 0 8 2 0] + }, + SAMPLE2 ->{ + EXON -> [0 0 0 0 1 0 0 8 2 0], + INTRONS -> [0 0 0 0 1 0 0 8 2 0], + CTCF ->[0 0 0 0 1 0 0 8 2 0] + } + }, + GUIDE2 ->{ + SAMPLE ->{ + ANNOTATION -> [annotation count for each mm value] + } + } +} +''' +# argv 1 is top1.samples.txt +# argv 2 is Annotation.targets +# argv 3 is Annotation.summary.txt -> to get the name of annotations #TODO modificare meglio, prenderle direttamente dal file +# argv 4 is result name +import sys +import os +import pandas as pd + +#Dict for populations +pop_file = pd.read_excel(os.path.dirname(os.path.realpath(__file__)) + '/20130606_sample_info.xlsx') +all_samples = pop_file.Sample.to_list() +all_pop = pop_file.Population.to_list() +dict_pop = dict() +for pos, i in enumerate(all_samples): + try: + dict_pop[i] = all_pop[pos] #{'S1':'POP1', 'S2':'POP1', ...} + except: + dict_pop[i] = all_pop[pos] + +#Dict for superpopulation +population_1000gp = {'CHB':'EAS', 'JPT':'EAS', 'CHS':'EAS', 'CDX':'EAS', 'KHV':'EAS', + 'CEU':'EUR', 'TSI':'EUR', 'FIN':'EUR', 'GBR':'EUR', 'IBS':'EUR', + 'YRI':'AFR', 'LWK':'AFR', 'GWD':'AFR', 'MSL':'AFR', 'ESN':'AFR', 'ASW':'AFR', 'ACB':'AFR', + 'MXL':'AMR', 'PUR':'AMR', 'CLM':'AMR', 'PEL':'AMR', + 'GIH':'SAS', 'PJL':'SAS', 'BEB':'SAS', 'STU':'SAS', 'ITU':'SAS' +} +superpopulation = ['EAS', 'EUR', 'AFR', 'AMR','SAS'] + +result_name = sys.argv[4] +# samples_dict = { + # GUIDE1 ->{ + # chrXposY -> [[Sample1, sample7], []] + # chrXposY2 -> [[Sample5, sample7], []] + # chrX2posY 3-> [[Sample10, sample11, sample30], []] + # }, + # GUIDE2 -> { + # CHRPOS -> [[Sample list], [List visited annotations]] List visited annotation is empty at first, but can become -> ['exon', 'promoter',...] + # } +# } +test_dict = {'GAGTCCGAGCAGAAGAAGAANNN':{0:0,1:0,2:0,3:0,4:0,5:0,6:0}, 'CCATCGGTGGCCGTTTGCCCNNN':{0:0,1:0,2:0,3:0,4:0,5:0,6:0}} + +samples_dict = dict() +annotation_dict = dict() +with open(sys.argv[1]) as targets: + for line in targets: + if '#' in line: + continue + line = line.strip().split('\t') + if line[-2] == 'n': + test_dict[line[1].replace('-','')][int(line[7])] +=1 + continue + guide = line[1].replace('-','') + if guide not in samples_dict: + samples_dict[guide] = dict() + try: + samples_dict[guide][line[3] + line[4]][0] += line[-2].split(',') + except: + samples_dict[guide][line[3] + line[4]] = [line[-2].split(','), []] +print(test_dict) +# print(samples_dict) +# print(samples_dict['CTAACAGTTGCTTTTATCACNNN']['chr2146560428']) +# print(samples_dict['TGCTTGGTCGGCACTGATAGNNN']['chr2250085897']) + +ann_list = [] #TODO better way to get annotation name + +with open (sys.argv[3], 'r') as ann_file: + next(ann_file) #Skip -Summary_Total line + next(ann_file) #Skip targets 0 0 0 ... line + for line in ann_file: + if '-Summary' in line: + break + ann_list.append(line.strip().split('\t')[0]) + + + +summary_targets_guide = dict() #To save targets and annotation count for top 1 +dict_pop_count = dict() #To save targets and annotations count for populations +dict_superpop_count = dict() #To save targets and annotations count for superpopulations + +with open (sys.argv[2]) as targets: #Count annotation for each target + for line in targets: + if '#' in line: + continue + line = line.strip().split('\t') + guide = line[1].replace('-','') + + if guide not in annotation_dict.keys(): + annotation_dict[guide] = dict() + summary_targets_guide[guide] = {'targets':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + dict_pop_count[guide] = dict() + dict_superpop_count[guide] = dict() + for pop in set(all_pop): + dict_pop_count[guide][pop] = {'targets':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + dict_superpop_count[guide][population_1000gp[pop]] = {'targets':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + + # try: + # summary_targets_guide[guide][line[-1]][int(line[7])] += 1 + # except: + # summary_targets_guide[guide][line[-1]] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + # summary_targets_guide[guide][line[-1]][int(line[7])] += 1 + # summary_targets_guide[guide]['targets'][int(line[7])] += 1 + try: + samples_list = samples_dict[guide][line[3] + line[4]] + except: + samples_list = [[], ann_list] + if line[-1] in samples_list[1]: #if target was already counted in that annotation + continue + #Count the annotations for the guide (only top1) + try: + summary_targets_guide[guide][line[-1]][int(line[7])] += 1 + except: + summary_targets_guide[guide][line[-1]] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + summary_targets_guide[guide][line[-1]][int(line[7])] += 1 + summary_targets_guide[guide]['targets'][int(line[7])] += 1 + + samples_dict[guide][line[3] + line[4]][1].append(line[-1]) #Get list of samples in current gudie chr pos + visited_pop = [] + visited_superpop = [] + for sample in samples_list[0] : + if sample not in annotation_dict[guide]: + annotation_dict[guide][sample] = {'targets':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + # print(guide, sample, line[-1], line[6]) + + try: + annotation_dict[guide][sample][line[-1]][int(line[7])] += 1 #increase annotation count + except: + annotation_dict[guide][sample][line[-1]] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + annotation_dict[guide][sample][line[-1]][int(line[7])] += 1 + + if dict_pop[sample] in visited_pop: + continue + else: + visited_pop.append(dict_pop[sample]) + dict_pop_count[guide][dict_pop[sample]]['targets'][int(line[7])] += 1 + try: + dict_pop_count[guide][dict_pop[sample]][line[-1]][int(line[7])] += 1 + except: + dict_pop_count[guide][dict_pop[sample]][line[-1]] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + dict_pop_count[guide][dict_pop[sample]][line[-1]][int(line[7])] += 1 + if population_1000gp[dict_pop[sample]] in visited_superpop: + continue + else: + visited_superpop.append(population_1000gp[dict_pop[sample]]) + dict_superpop_count[guide][population_1000gp[dict_pop[sample]]]['targets'][int(line[7])] += 1 + try: + dict_superpop_count[guide][population_1000gp[dict_pop[sample]]][line[-1]][int(line[7])] += 1 + except: + dict_superpop_count[guide][population_1000gp[dict_pop[sample]]][line[-1]] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + dict_superpop_count[guide][population_1000gp[dict_pop[sample]]][line[-1]][int(line[7])] += 1 + annotation_dict[guide][sample]['targets'][int(line[7])] += 1 + +for guide in annotation_dict: + with open(result_name + '.sample_annotation.' + guide +'.samples.txt', 'w+') as result: + result.write('-Summary_Total\n') + result.write('targets\t' + '\t'.join([str(x) for x in summary_targets_guide[guide]['targets']]) + '\n') + for annotation in ann_list: + try: + result.write(annotation + '\t' + '\t'.join([str(x) for x in summary_targets_guide[guide][annotation]]) + '\n') + except: + result.write(annotation + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + + #Write summary for each sample + for sample in all_samples:#annotation_dict[guide]: + result.write('-Summary_' + sample + '\n') + try: + result.write('targets\t' + '\t'.join([str(x) for x in annotation_dict[guide][sample]['targets']]) + '\n') + except: + result.write('targets' + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + for annotation in ann_list: + try: + result.write(annotation + '\t' + '\t'.join([str(x) for x in annotation_dict[guide][sample][annotation]]) + '\n') + except: + result.write(annotation + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + + +for guide in annotation_dict: + with open(result_name + '.sample_annotation.' + guide + '.population.txt', 'w+') as result: + #Write result guide general + result.write('-Summary_Total\n') + result.write('targets\t' + '\t'.join([str(x) for x in summary_targets_guide[guide]['targets']]) + '\n') + for annotation in ann_list: + try: + result.write(annotation + '\t' + '\t'.join([str(x) for x in summary_targets_guide[guide][annotation]]) + '\n') + except: + result.write(annotation + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + #Write result population + for population in set(all_pop): + result.write('-Summary_' + population + '\n') + try: + result.write('targets\t' + '\t'.join([str(x) for x in dict_pop_count[guide][population]['targets']]) + '\n') + except: + result.write('targets' + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + for annotation in ann_list: + try: + result.write(annotation + '\t' + '\t'.join([str(x) for x in dict_pop_count[guide][population][annotation]]) + '\n') + except: + result.write(annotation + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + + +#For each superpopulation, write sum of population +for guide in annotation_dict: + with open(result_name + '.sample_annotation.' + guide + '.superpopulation.txt', 'w+') as result: + #Write result guide general + result.write('-Summary_Total\n') + result.write('targets\t' + '\t'.join([str(x) for x in summary_targets_guide[guide]['targets']]) + '\n') + for annotation in ann_list: + try: + result.write(annotation + '\t' + '\t'.join([str(x) for x in summary_targets_guide[guide][annotation]]) + '\n') + except: + result.write(annotation + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + #Write result superpopulation + for superpop in superpopulation: + result.write('-Summary_' + superpop + '\n') + try: + result.write('targets\t' + '\t'.join([str(x) for x in dict_superpop_count[guide][superpop]['targets']]) + '\n') + except: + result.write('targets' + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + for annotation in ann_list: + try: + result.write(annotation + '\t' + '\t'.join([str(x) for x in dict_superpop_count[guide][superpop][annotation]]) + '\n') + except: + result.write(annotation + '\t' + '\t'.join(['0' for i in range(10)]) + '\n') + + + \ No newline at end of file diff --git a/OldScripts/annotator_for_onlyvar.old.py b/OldScripts/annotator_for_onlyvar.old.py new file mode 100644 index 0000000..0ca6edf --- /dev/null +++ b/OldScripts/annotator_for_onlyvar.old.py @@ -0,0 +1,946 @@ +#!/usr/bin/env python + +''' +Merge of annotator, calc_samples_faster.py and scores. ONLY FOR VAR SEARCH since no distinction between semicommon etc +Prende in input il file dei top1, ordinati per chr, e estrae i samples corrispondenti. Per ogni target, salva l'insieme dei sample in samples.all.txt, crea le combinazioni tenendo i target reali +in samples.txt, poi calcola l'annotazione corrispondente e crea il file Annotation.targets e i vari summaries. +''' + + +#NOTE serve 20130606_sample_info.xlsx nella stessa cartella di questo script +#argv1 è il file .bed con le annotazioni +#argv2 è il file .cluster.txt, che è ordinato per cromosoma. Era (03/03) il file top1 ordinato per chr +#argv3 è nome del file in output +#argv4 è directory dei dizionari +#argv5 is pamfile +#argv 6 is max allowed mms +#argv 7 is genome reference directory (Eg ../../Genomes/hg38_ref) +#argv8 is guide file +# NOTE 06/03 -> removed PAM Disruption calculation +import sys +import json +import time +import itertools +import os +from intervaltree import Interval, IntervalTree +import concurrent.futures +import subprocess +import pandas as pd +import pickle #to read CFD matrices +import numpy as np +import azimuth.model_comparison +import string +import multiprocessing + +SIZE_DOENCH = 10000 +N_THR = 3 + +#Return max doench value among list of extended targets +def doenchParallel(targets, model, result): + doench_score = azimuth.model_comparison.predict(targets,None, None, model= model, pam_audit=False) + doench_score = [np.around(i * 100) for i in doench_score] + max_doench = int(max(doench_score)) + result.append(max_doench) + +def doenchForIupac(sequence_doench, guide_seq): + pos_iupac = [] + var = [] + for pos, c in enumerate(sequence_doench): + if c in iupac_code: + pos_iupac.append(pos) + var.append(iupac_code[c]) + + if var: + for i in itertools.product(*var): + t = list(sequence_doench) + for p, el in enumerate(pos_iupac): + t[el] = i[p] + targets_for_doench[guide_seq].append(''.join(t)) + else: + targets_for_doench[guide_seq].append(sequence_doench) + +def get_mm_pam_scores(): + try: + mm_scores = pickle.load(open(os.path.dirname(os.path.realpath(__file__)) + '/mismatch_score.pkl', 'rb')) + pam_scores = pickle.load(open(os.path.dirname(os.path.realpath(__file__)) +'/PAM_scores.pkl', 'rb')) + return (mm_scores, pam_scores) + except: + raise Exception("Could not find file with mismatch scores or PAM scores") + + +def revcom(s): + basecomp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A'} + letters = list(s[::-1]) + letters = [basecomp[base] for base in letters] + return ''.join(letters) + +# Calculates CFD score +def calc_cfd(guide_seq, sg, pam, mm_scores, pam_scores): + score = 1 + dna_gp = 0 + sg = sg.replace('T', 'U') + guide_seq = guide_seq.replace('T', 'U') + s_list = list(sg) + guide_seq_list = list(guide_seq) + for i, sl in enumerate(s_list): + if guide_seq_list[i] == sl: + score *= 1 + else: + key = 'r' + guide_seq_list[i] + ':d' + revcom(sl) + ',' + str(i + 1) + score *= mm_scores[key] + if '-' in guide_seq_list[i]: + dna_gp = dna_gp + 1 + score *= pam_scores[pam] + return score + +print('ESECUZIONE DI ANNOTATION E CALC SAMPLE INSIEME') +print('TEST PER ANNOTAZIONE COMPLETA: I TARGET SENZA ANNOTAZIONE SONO SALVATI COME \"n\"') +print('SE UN TARGET HA 1+ ANNOTAZIONI, LE SALVA IN SINGOLA UNICA RIGA') +print('RIMOZIONE DEI TARGET CHE NON HANNO SAMPLES') +print('CALCOLO SCORES') +print("READING INPUT FILES") +#Dictionaries for annotating samples + +#Dict for populations +pop_file = pd.read_excel(os.path.dirname(os.path.realpath(__file__)) + '/20130606_sample_info.xlsx') +all_samples = pop_file.Sample.to_list() +all_pop = pop_file.Population.to_list() +dict_sample_to_pop = dict() +for pos, i in enumerate(all_samples): + try: + dict_sample_to_pop[i] = all_pop[pos] #{'S1':'POP1', 'S2':'POP1', ...} + except: + dict_sample_to_pop[i] = all_pop[pos] + +#Dict for superpopulation +dict_pop_to_sup = {'CHB':'EAS', 'JPT':'EAS', 'CHS':'EAS', 'CDX':'EAS', 'KHV':'EAS', + 'CEU':'EUR', 'TSI':'EUR', 'FIN':'EUR', 'GBR':'EUR', 'IBS':'EUR', + 'YRI':'AFR', 'LWK':'AFR', 'GWD':'AFR', 'MSL':'AFR', 'ESN':'AFR', 'ASW':'AFR', 'ACB':'AFR', + 'MXL':'AMR', 'PUR':'AMR', 'CLM':'AMR', 'PEL':'AMR', + 'GIH':'SAS', 'PJL':'SAS', 'BEB':'SAS', 'STU':'SAS', 'ITU':'SAS' +} +superpopulation = ['EAS', 'EUR', 'AFR', 'AMR','SAS'] + + +#READ INPUT FILES +annotationFile = sys.argv[1] #file with annotation +resultsFile = sys.argv[2] #file with results from search +outputFile = sys.argv[3] #file with annotated results + +#Get pam and guide length for new count mismatch samples +pam_at_beginning = False +with open (sys.argv[5]) as pam: + line = pam.read().strip() + pam = line.split(' ')[0] + len_pam = int(line.split(' ')[1]) + guide_len = len(pam) - len_pam + pos_beg = 0 + pos_end = None + pam_begin = 0 + pam_end = len_pam * (-1) + if len_pam < 0: + guide_len = len(pam) + len_pam + pam = pam[: (len_pam * (-1))] + len_pam = len_pam * (-1) + pos_beg = len_pam + pos_end = None + pam_begin = 0 + pam_end = len_pam + pam_at_beginning = True + else: + pam = pam[(len_pam * (-1)):] + pos_beg = 0 + pos_end = len_pam * (-1) + pam_begin = len_pam * (-1) + pam_end = None + +do_scores = True +if guide_len != 20 or 'NGG' != pam: + with open('acfd.txt', 'w+') as result: + result.write('NO SCORES') + do_scores = False + +iupac_code_set = { + "R":{"A", "G"}, + "Y":{"C", "T"}, + "S":{"G", "C"}, + "W":{"A", "T"}, + "K":{"G", "T"}, + "M":{"A", "C"}, + "B":{"C", "G", "T"}, + "D":{"A", "G", "T"}, + "H":{"A", "C", "T"}, + "V":{"A", "C", "G"}, + "r":{"A", "G"}, + "y":{"C", "T"}, + "s":{"G", "C"}, + "w":{"A", "T"}, + "k":{"G", "T"}, + "m":{"A", "C"}, + "b":{"C", "G", "T"}, + "d":{"A", "G", "T"}, + "h":{"A", "C", "T"}, + "v":{"A", "C", "G"}, + "A":{"A"}, + "T":{"T"}, + "C":{"C"}, + "G":{"G"}, + "a":{"a"}, + "t":{"t"}, + "c":{"c"}, + "g":{"g"}, + 'N':{'A','T','G','C'} + } + + +#OPEN INPUT FILES AND PREPARE OUTPUT FILE +inResult = open(resultsFile, "r") # resultfile open +inAnnotationFile = open(annotationFile, "r") # file with annotations open +# outFileSampleAll = open(outputFile + '.samples.all.annotation.txt', 'w') # outfile open (file with IUPAC targets and associated samples and annotation) +outFileSample = open(outputFile + '.samples.annotation.txt', 'w') #file with real nucleotides with associated samples and annotation +outFileSummary = open(outputFile + '.Annotation.summary.txt', 'w') # outfile open (summary file calculated on top1file) + +process = subprocess.Popen(['wc', '-l', resultsFile], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +out, err = process.communicate() +total_line = int(out.decode('UTF-8').split(' ')[0]) +if total_line < 2: + print('WARNING! Input file has no targets') + sys.exit() +if total_line < 10: + mod_tot_line = 1 +else: + mod_tot_line = int(total_line/10) +#VARIABLE INIT +guideDict = {} +totalDict = {} + +start_time = time.time() + +print("EXECUTING PRELIMINARY OPERATIONS") + +annotationsTree = IntervalTree() +annotationsSet = set() +#guidesSet = set() #NOTE/BUG if guide finds 0 targets, it will not be annotated + +for line in inAnnotationFile: + x = line.split('\t') + x[3] = str(x[3]).rstrip("\n") + annotationsTree[int(x[1]):int(x[2])] = str(x[0])+'\t'+str(x[3]) + annotationsSet.add(str(x[3])) + +totalDict['targets'] = [0]*10 +for item in annotationsSet: + totalDict[item] = [0]*10 + +print("PRELIMINARY OPERATIONS COMPLETED IN: %s seconds" % (time.time() - start_time)) + +start_time = time.time() + +print("EXECUTING ANNOTATION") + +with open(resultsFile, 'r') as resFile: + header_len = len(resFile.readline().strip().split('\t')) + +if header_len == 14: #'Both' case : comparison variant/ref is active + header = '#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tCluster Position\tDirection\tMismatches\tBulge_Size\tTotal\tMin_mismatches\tMax_mismatches\tPAM_gen\tVar_uniq\tSamples\tReal Guide\tAnnotation Type' +else: #'Var' case: PAM creation and Variant_unique not calculated + header = '#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tCluster Position\tDirection\tMismatches\tBulge_Size\tTotal\tMin_mismatches\tMax_mismatches\tSamples\tReal Guide\tAnnotation Type' + +mm_pos = 7 #position of mismatch column +bulge_pos = 8 +outFileSample.write(header + '\n') +# outFileSampleAll.write(header + '\n') +summary_samples = True + +header_list = header.strip().split('\t') +#Variables for summary samples code +''' +{ + GUIDE1 -> { + SAMPLE/POP/SUPERPOP1 ->{ + targets -> [0 0 0 0 0 0 0 0 0], + ann1 -> [0 0 0 0 0 0 0 0 0], + ann2 -> [0 0 0 0 0 0 0 0 0], + }, + SAMPLE/POP/SUPERPOP2 ->{ + targets -> [0 0 0 0 0 0 0 0 0], + ann1 -> [0 0 0 0 0 0 0 0 0], + ann2 -> [0 0 0 0 0 0 0 0 0], + } + } + GUIDE2 -> { + SAMPLE/POP/SUPERPOP1 ->{ + targets -> [0 0 0 0 0 0 0 0 0], + ann1 -> [0 0 0 0 0 0 0 0 0], + ann2 -> [0 0 0 0 0 0 0 0 0], + }, + SAMPLE/POP/SUPERPOP2 ->{ + targets -> [0 0 0 0 0 0 0 0 0], + ann1 -> [0 0 0 0 0 0 0 0 0], + ann2 -> [0 0 0 0 0 0 0 0 0], + } + } +} + +Per pop e superpop, se ho due sample stessa famiglia stesso target, conto solo una volta (visited_pop and visited_superpop array) +''' +count_sample = dict() +count_pop = dict() +count_superpop = dict() + +#Create -Summary_total for a file ref.Annotation.summary.txt from the y and n values of Var_uniq column +summary_barplot_from_total = False +if 'Var_uniq' in header: + vu_pos = header_list.index('Var_uniq') +count_unique = dict() +count_unique['targets'] = [0]*10 +count_unique_for_guide = dict() +for item in annotationsSet: + count_unique[item] = [0]*10 + +#Variables for samples calculation +total_error = 0 + + +current_chr = 'none' +chr_name = 'none' + +def rev_comp(a): + if a == 'A' or a == 'a': + return 'T' + if a == 'T' or a == 't': + return 'A' + if a == 'C' or a == 'c': + return 'G' + return 'C' + +iupac_code = { + "R":("A", "G"), + "Y":("C", "T"), + "S":("G", "C"), + "W":("A", "T"), + "K":("G", "T"), + "M":("A", "C"), + "B":("C", "G", "T"), + "D":("A", "G", "T"), + "H":("A", "C", "T"), + "V":("A", "C", "G"), + "r":("A", "G"), + "y":("C", "T"), + "s":("G", "C"), + "w":("A", "T"), + "k":("G", "T"), + "m":("A", "C"), + "b":("C", "G", "T"), + "d":("A", "G", "T"), + "h":("A", "C", "T"), + "v":("A", "C", "G"), + 'N':('A', 'T', 'C', 'G') + } + +#For scoring of CFD And Doench +tab = str.maketrans("ACTGRYSWMKHDBVactgryswmkhdbv", "TGACYRSWKMDHVBtgacyrswkmdhvb") + +def reverse_complement_table(seq): + return seq.translate(tab)[::-1] + +mm_scores, pam_scores = get_mm_pam_scores() +guides_dict = dict() +guides_dict_doench = dict() +targets_for_doench = dict() + +N_THR = multiprocessing.cpu_count() // 2 +refgenomedir = sys.argv[7] + +with open( os.path.dirname(os.path.realpath(__file__)) + "/azimuth/saved_models/V3_model_nopos.pickle", 'rb') as f: + model = pickle.load(f) +max_doench = 0 +sum_cfd = 0 +cfd_scores = [] + + +start_time_total = time.time() +lines_processed = 0 +allowed_mms = int(sys.argv[6]) +current_guide_chr_pos = 'no' +cluster_update = open(outputFile + '.cluster.tmp.txt', 'w+') +cluster_update.write(header + '\n') #Write header +save_cluster_targets = True +remove_iupac = False +save_total_general_table = False +add_to_general_table = dict() #target semicommon da aggiungere alla tab generale delle guide, metto i valori di total presi dal primo target REF nel cluster di un semicommon che esiste +last_annotation = '' #needed when counting the ref part of a semicommon in order to not redo the annotation + +next(inResult) #Skip header +for line in inResult: + x = line.strip().split('\t') + guide_no_bulge = x[1].replace("-","") + if (guide_no_bulge + x[3] + x[5]) == current_guide_chr_pos: #Target is in current cluster, simply save the sample and annotation, discard if status is F + if save_cluster_targets: + if remove_iupac: + for c in x[2]: + if c in iupac_code: + break + else: #no break triggered + cluster_update.write(line.strip() + '\t.\t' + guide_no_bulge + '\t.\n') + continue + #Keep the semicommon ref total value to be added to general table + if save_total_general_table: + for c in x[2]: + if c in iupac_code: + break + else: #Found first REF target in cluster of semicommon + add_to_general_table[guide_no_bulge][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + #Do annotation to keep numbers consistent between images and general table + #conto i target generali per mm threshold + totalDict['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + guideDict[guide_no_bulge]['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + + #Conto per annotazione + for ann in last_annotation.split(','): + if ann == 'n': + break + guideDict[guide_no_bulge][ann][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + totalDict[ann][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + + #Calculate scores + if do_scores and x[0] == 'X': #Calculate scores for reference targets + cfd_score = calc_cfd(x[1], x[2].upper()[:-3], x[2].upper()[-2:], mm_scores, pam_scores) + sum_cfd = sum_cfd + cfd_score + try: + guides_dict[x[1]] = guides_dict[x[1]] + cfd_score + except: + guides_dict[x[1]] = cfd_score + + if x[mm_pos] == '0': #DOENCH + #estraggo sequenza + with open('bedfile_tmp.bed', 'w+') as bedfile: + if x[6] == '+': + bedfile.write(x[3] + '\t' + str(int(x[4]) - 4 ) + '\t' + str(int(x[4]) + 23 + 3 )) + else: + bedfile.write(x[3] + '\t' + str(int(x[4]) - 3 ) + '\t' + str(int(x[4]) + 23 + 4 )) + #Extract sequence from REFERENCE + extr = subprocess.Popen(['bedtools getfasta -fi ' + refgenomedir + '/' + x[3] +'.enriched.fa' ' -bed bedfile_tmp.bed'], shell = True, stdout=subprocess.PIPE) #TODO insert option for .fasta + extr.wait() + out, err = extr.communicate() + out = out.decode('UTF-8') + if x[6] == '+': + sequence_doench = out.strip().split('\n')[-1].upper() + # sequence_doench = sequence_doench[:4] + x[2] + sequence_doench[-3:] + else: + sequence_doench = reverse_complement_table(out.strip().split('\n')[-1].upper()) + # sequence_doench = sequence_doench[:4] + x[2] + sequence_doench[-3:] + + if x[1] not in targets_for_doench: + targets_for_doench[x[1]] = [] + doenchForIupac(sequence_doench, x[1]) #Get all possible targets with iupac itertools for doench + save_total_general_table = False + + cluster_update.write(line.strip() + '\t.\t' + guide_no_bulge + '\t.\n') #add Sample (.) GuideNoBulge and Annotation(.). Use (.) to save space + lines_processed +=1 + if lines_processed % (mod_tot_line) == 0: + print('Annotation: Total progress ' + str(round(lines_processed /total_line *100, 2)) + '%') + continue + save_cluster_targets = True + remove_iupac = False + current_guide_chr_pos = guide_no_bulge + x[3] + x[5] + if x[3] != current_chr: + if not os.path.exists(os.path.realpath(sys.argv[4]) + '/my_dict_' + x[3] + '.json'): + pass + else: + print('Done ', current_chr) + current_chr = x[3] + chr_name = x[3] + with open(os.path.realpath(sys.argv[4]) + '/my_dict_' + current_chr + '.json', 'r') as f: + start_time = time.time() + datastore = json.load(f) + print ('Load ' + current_chr + ' done', time.time() - start_time) + + pos_snp = [] + tuple_var_ref = [] + target_combination = [] + pos_snp_chr = [] + set_list = [] + target_string = x[2] + if x[6] == '-': + target_string = target_string[::-1] + bulge_found = 0 + for pos, char in enumerate(target_string): + if char == '-': + bulge_found = bulge_found + 1 + if char in iupac_code: + iupac_pos = str(int(x[4]) + pos + 1 - bulge_found) + try: + a = (datastore[chr_name + ',' + iupac_pos]) #NOTE se non ha samples, ritorna ;ref,var + + ref_char = a.split(';')[-1].split(',')[0] + var_char = a.split(';')[-1].split(',')[1] + + if x[6] == '-': + ref_char = rev_comp(ref_char) + var_char = rev_comp(var_char) + + a = a.split(';')[0] + pos_snp.append(pos) + pos_snp_chr.append(iupac_pos) + tuple_var_ref.append((var_char, ref_char)) + except Exception as e: #NOTE this error can occure if i have an IUPAC in a target that has no vcf file + print(e) + print('Error at ' + line.rstrip() + ', with char ' + char + ', at pos ', iupac_pos, '. No corresponding SNP position was found in the vcf file') + a = [] + total_error = total_error + 1 + if a: + set_list.append(set(a.split(','))) + else: + set_list.append(set()) + #Get Union of all samples + union_sample = list(set().union(*set_list)) + if union_sample: + x.append(','.join(union_sample)) + else: + x.append('n') + x.append(guide_no_bulge) + #Get all combinations to remove targets that have no haplotype + #Create all combinations + for i in itertools.product(*tuple_var_ref): + t = list(target_string) + for p, el in enumerate(pos_snp): + t[el] = i[p] + target_combination.append(''.join(t)) + + target_scomposti_salvare = [] + samples_already_assigned = set() + false_targets = 0 + for t in target_combination: + set_list2 = [] + final_result = x.copy() + for ele_pos,p in enumerate(pos_snp_chr): + a = (datastore[chr_name + ',' + p]) + samples = a.split(';')[0] #a[:-4] + + ref = a.split(';')[-1].split(',')[0] + var = a.split(';')[-1].split(',')[1] + if x[6] == '-': + ref = rev_comp(ref) + var = rev_comp(var) + + if t[pos_snp[ele_pos]].upper() == var: + if samples: + set_list2.append(set(samples.split(','))) + else: + set_list2.append(set()) + + if set_list2: + common_samples = set.intersection(*set_list2) + common_samples = common_samples - samples_already_assigned + samples_already_assigned = samples_already_assigned.union(common_samples) + if common_samples: + final_result[-2] = ','.join(common_samples) + else: + # final_result.append('No common samples') + final_result = [] #DO not save results without samples + false_targets += 1 + else: + # final_result.append('No samples') #DO not save results without samples + final_result = [] + if set_list: #Increase false_targets on targets that have at least 1 IUPAC + false_targets += 1 + if x[6] == '-': + t = t[::-1] + mm_new_t = 0 + + if final_result: + guide_no_pam = final_result[1][pos_beg:pos_end] + for position_t, char_t in enumerate(t[pos_beg:pos_end]): + if char_t.upper() != guide_no_pam[position_t]: + mm_new_t += 1 + final_result[2] = t + + #Check for pam status + pam_ok = True + for pam_chr_pos, pam_chr in enumerate(t[pam_begin:pam_end]): + if pam_chr.upper() not in iupac_code_set[pam[pam_chr_pos]]: + pam_ok = False + + if not pam_ok or allowed_mms < (mm_new_t - int(final_result[8])): + false_targets += 1 + x[-2] = ','.join(set(x[-2].split(',')) - set(common_samples)) + continue #Remove target since id does not respect PAM or mms constrains + + final_result[7] = str(mm_new_t - int(final_result[8])) + final_result[9] = str(mm_new_t) #total differences between targets and guide (mismatches + bulges) + target_scomposti_salvare.append(final_result) + if false_targets >= len(target_combination): #If all the scomposed targets have no sample or do not respect PAM/mm threasold, the iupac target does not really exist + line = line.strip().split('\t') + #Do not do annotation because target does not exists, and do not save his cluster + save_cluster_targets = False + continue #DO NOT save this target because no ref homologous and no sample combination exists + #target does not exists in enriched, but exists in reference (semi_common), so keep only the reference targets (from his cluster) + # if line[6] == '-': + # reference_semicommon = list(x[2][::-1]) + # else: + # reference_semicommon = list(x[2]) + # for tuple_var_ref_pos, tuple_var_ref_chars in enumerate(tuple_var_ref): + # reference_semicommon[pos_snp[tuple_var_ref_pos]] = tuple_var_ref_chars[1] + # new_ref_target = ''.join(reference_semicommon) + # if line[6] == '-': + # new_ref_target = new_ref_target[::-1] + # line[2] = new_ref_target #TODO fixare perchè il carattere ref potrebbe non essere corretto (non esiste il ref che ha lo stesso gap che nel top1) + # x[2] = new_ref_target + # guide_no_pam = line[1][pos_beg:pos_end] + # for position_t, char_t in enumerate(new_ref_target[pos_beg:pos_end]): + # if char_t.upper() != guide_no_pam[position_t]: #TODO mettere lettere minuscole + # mm_new_t += 1 + x[-2] = 'n' #Since iupac target has no scomposition, it means it has no sample associated + # x[7] = str(mm_new_t - int(x[8])) + # x[9] = str(mm_new_t) #total differences between targets and guide (mismatches + bulges) + # line[7] = str(mm_new_t - int(line[8])) + # line[9] = str(mm_new_t) #total differences between targets and guide (mismatches + bulges) + line = '\t'.join(line) + save_cluster_targets = True #TODO salvare il cluster ma togliendo gli iupac + remove_iupac = True + x = next(inResult).strip().split('\t') #get next target of the cluster + while (x[1].replace('-','') + x[3] + x[5]) == current_guide_chr_pos: #while still in same cluster + for c in x[2]: + if c in iupac_code: + break + else: #no break triggered in previous for --> x[2] has no iupac char + break + x = next(inResult).strip().split('\t') + line = '\t'.join(x) #Fist target in the cluster that is REF + x.append('n') #Fist target in the cluster that is REF + x.append(x[1].replace('-','')) #Fist target in the cluster that is REF + tuple_var_ref = [] #Since this is now a REF target, it has no iupac --> needed to save to sample.annotation file + if target_scomposti_salvare: #Keep the target with lowest total and mms as representative of the IUPAC target + target_scomposti_salvare.sort(key = lambda x : (int(x[mm_pos + 2]), int(x[mm_pos]))) #Order scomposition by total and mms values + x[2] = target_scomposti_salvare[0][2] #Adjust Target sequence, from IUPAC to first of scomposition + x[mm_pos] = target_scomposti_salvare[0][mm_pos] + x[mm_pos + 2] = target_scomposti_salvare[0][mm_pos + 2] #Adjust IUPAC with min total and mms of his scomposition + + #Annotate target + visited_pop = [] + visited_superpop = [] + + #inserisco la key nel dict se non presente e creo la sua matrice + if(guide_no_bulge not in guideDict.keys()): + guideDict[guide_no_bulge] = {} + guideDict[guide_no_bulge]['targets'] = {} + guideDict[guide_no_bulge]['targets'] = [0]*10 + + add_to_general_table[guide_no_bulge] = [0] * 10 # GUIDE -> [ 0 0 0 0 0 ...] valori per total (mms + bulge) + + count_unique_for_guide[guide_no_bulge] = dict() #NOTE count_unique means that the target have at least 1 sample + count_unique_for_guide[guide_no_bulge]['targets'] = [0]*10 + + count_sample[guide_no_bulge] = dict() + count_pop[guide_no_bulge] = dict() + count_superpop[guide_no_bulge] = dict() + + for item in annotationsSet: + guideDict[guide_no_bulge][item]= {} + guideDict[guide_no_bulge][item] = [0]*10 + + count_unique_for_guide[guide_no_bulge][item] = [0]*10 + + #conto i target generali per mm threshold + totalDict['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + guideDict[guide_no_bulge]['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + + if summary_barplot_from_total: + if x[-2] != 'n': + count_unique['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + count_unique_for_guide[guide_no_bulge]['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + + if summary_samples: + for sample in x[-2].split(','): + if sample == 'n': + continue + #Initialization if sample, pop or superpop not in dict + if sample not in count_sample[guide_no_bulge]: + count_sample[guide_no_bulge][sample] = {'targets': [0]*10} + for item in annotationsSet: + count_sample[guide_no_bulge][sample][item] = [0]*10 + if dict_sample_to_pop[sample] not in count_pop[guide_no_bulge]: + count_pop[guide_no_bulge][dict_sample_to_pop[sample]] = {'targets': [0]*10} + for item in annotationsSet: + count_pop[guide_no_bulge][dict_sample_to_pop[sample]][item] = [0]*10 + if dict_pop_to_sup[dict_sample_to_pop[sample]] not in count_superpop[guide_no_bulge]: + count_superpop[guide_no_bulge][dict_pop_to_sup[dict_sample_to_pop[sample]]] = {'targets': [0]*10} + for item in annotationsSet: + count_superpop[guide_no_bulge][dict_pop_to_sup[dict_sample_to_pop[sample]]][item] = [0]*10 + #Add +1 to targets + count_sample[guide_no_bulge][sample]['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + if dict_sample_to_pop[sample] not in visited_pop: + visited_pop.append(dict_sample_to_pop[sample]) + count_pop[guide_no_bulge][dict_sample_to_pop[sample]]['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + if dict_pop_to_sup[dict_sample_to_pop[sample]] not in visited_superpop: + visited_superpop.append(dict_pop_to_sup[dict_sample_to_pop[sample]]) + count_superpop[guide_no_bulge][dict_pop_to_sup[dict_sample_to_pop[sample]]]['targets'][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + visited_pop = [] + visited_superpop = [] + + #faccio match su albero + foundAnnotations = sorted(annotationsTree[int(x[4]):(int(x[4])+int(len(guide_no_bulge))+1)]) + string_annotation = [] + found_bool = False + for found in range(0, len(foundAnnotations)): + guide = foundAnnotations[found].data + guideSplit = guide.split('\t') + # print(guide, str(guideSplit[0]), str(x[3])) + if(str(guideSplit[0]) == str(x[3])): + found_bool = True + #outFileTargets.write(line.rstrip() + '\t' + str(guideSplit[1]) + "\n") + string_annotation.append(str(guideSplit[1])) + guideDict[guide_no_bulge][guideSplit[1]][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + totalDict[guideSplit[1]][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + + if summary_barplot_from_total: + if x[-2] != 'n': + count_unique[guideSplit[1]][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + count_unique_for_guide[guide_no_bulge][guideSplit[1]][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + + if summary_samples: + for sample in x[-2].split(','): + if sample == 'n': + continue + #Add +1 to annotation + count_sample[guide_no_bulge][sample][guideSplit[1]][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + if dict_sample_to_pop[sample] not in visited_pop: + visited_pop.append(dict_sample_to_pop[sample]) + count_pop[guide_no_bulge][dict_sample_to_pop[sample]][guideSplit[1]][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + if dict_pop_to_sup[dict_sample_to_pop[sample]] not in visited_superpop: + visited_superpop.append(dict_pop_to_sup[dict_sample_to_pop[sample]]) + count_superpop[guide_no_bulge][dict_pop_to_sup[dict_sample_to_pop[sample]]][guideSplit[1]][int(x[mm_pos]) + int(x[bulge_pos])] += 1 + visited_pop = [] + visited_superpop = [] + if not found_bool: + x.append('n') + #outFileTargets.write(line.rstrip() + '\tn\n') + else: + x.append(','.join(string_annotation)) + #outFileTargets.write(line.rstrip() + '\t' + ','.join(string_annotation) + '\n') + last_annotation = x[-1] + #Save union samples + annotation + # outFileSampleAll.write(line.rstrip() + '\t' + '\t'.join(x[-3:]) + '\n') + + #Save cluster + # cluster_update.write(line.rstrip() + '\t' + '\t'.join(x[-3:]) + '\n') + if target_scomposti_salvare: + cluster_update.write('\t'.join(x[:-3]) + '\t' + target_scomposti_salvare[0][-2] + '\t' + '\t'.join(x[-2:]) + '\n') ##This line does not contain IUPAC, needed for summary by position; Adjust sample list for target scomposed + else: + cluster_update.write('\t'.join(x) + '\n') #This line does not contain IUPAC, needed for summary by position + cluster_update.write(line.rstrip() + '\t' + '\t'.join(x[-3:]) + '\n') #Write line with iupac (if present) + #Save scomposed targets + if do_scores: + for t in target_scomposti_salvare: + outFileSample.write('\t'.join(t) + '\t' + x[-1] + '\n') + + #Calc scores for scomposed targets + if t[0] == 'X': + cfd_score = calc_cfd(t[1], t[2].upper()[:-3], t[2].upper()[-2:], mm_scores, pam_scores) + sum_cfd = sum_cfd + cfd_score + try: + guides_dict[t[1]] = guides_dict[t[1]] + cfd_score + except: + guides_dict[t[1]] = cfd_score + + if t[mm_pos] == '0': #DOENCH + #estraggo sequenza + with open('bedfile_tmp.bed', 'w+') as bedfile: + if t[6] == '+': + bedfile.write(t[3] + '\t' + str(int(t[4]) - 4 ) + '\t' + str(int(t[4]) + 23 + 3 )) + else: + bedfile.write(t[3] + '\t' + str(int(t[4]) - 3 ) + '\t' + str(int(t[4]) + 23 + 4 )) + + extr = subprocess.Popen(['bedtools getfasta -fi ' + refgenomedir + '/' + t[3] +'.enriched.fa' ' -bed bedfile_tmp.bed'], shell = True, stdout=subprocess.PIPE) #TODO insert option for .fasta + extr.wait() + out, err = extr.communicate() + out = out.decode('UTF-8') + if t[6] == '+': + sequence_doench = out.strip().split('\n')[-1].upper() + # sequence_doench = sequence_doench[:4] + t[2] + sequence_doench[-3:] #Uncomment to use sequence specific for sample + else: + sequence_doench = reverse_complement_table(out.strip().split('\n')[-1].upper()) + # sequence_doench = sequence_doench[:4] + t[2] + sequence_doench[-3:] #Uncomment to use sequence specific for sample + + if t[1] not in targets_for_doench: + targets_for_doench[t[1]] = [] + doenchForIupac(sequence_doench, t[1]) #Get all possible targets with iupac itertools for doench + + if not tuple_var_ref and x[0] == 'X': #Calculate scores for reference targets + cfd_score = calc_cfd(x[1], x[2].upper()[:-3], x[2].upper()[-2:], mm_scores, pam_scores) + sum_cfd = sum_cfd + cfd_score + try: + guides_dict[x[1]] = guides_dict[x[1]] + cfd_score + except: + guides_dict[x[1]] = cfd_score + + if x[mm_pos] == '0': #DOENCH + #estraggo sequenza + with open('bedfile_tmp.bed', 'w+') as bedfile: + if x[6] == '+': + bedfile.write(x[3] + '\t' + str(int(x[4]) - 4 ) + '\t' + str(int(x[4]) + 23 + 3 )) + else: + bedfile.write(x[3] + '\t' + str(int(x[4]) - 3 ) + '\t' + str(int(x[4]) + 23 + 4 )) + #Extract sequence from REFERENCE + extr = subprocess.Popen(['bedtools getfasta -fi ' + refgenomedir + '/' + x[3] +'.enriched.fa' ' -bed bedfile_tmp.bed'], shell = True, stdout=subprocess.PIPE) #TODO insert option for .fasta + extr.wait() + out, err = extr.communicate() + out = out.decode('UTF-8') + if x[6] == '+': + sequence_doench = out.strip().split('\n')[-1].upper() + # sequence_doench = sequence_doench[:4] + x[2] + sequence_doench[-3:] + else: + sequence_doench = reverse_complement_table(out.strip().split('\n')[-1].upper()) + # sequence_doench = sequence_doench[:4] + x[2] + sequence_doench[-3:] + + if x[1] not in targets_for_doench: + targets_for_doench[x[1]] = [] + doenchForIupac(sequence_doench, x[1]) #Get all possible targets with iupac itertools for doench + + else: + for t in target_scomposti_salvare: + outFileSample.write('\t'.join(t) + '\t' + x[-1] + '\n') + + if not tuple_var_ref: + outFileSample.write(line.rstrip() + '\t' + '\t'.join(x[-3:]) + '\n') #Save REF target in samples.annotation, needed for sum by guide + lines_processed +=1 + if lines_processed % (mod_tot_line) == 0: + print('Annotation: Total progress ' + str(round(lines_processed /total_line *100, 2)) + '%') + +############ SAVE SUMMARIES ############ + + +#scorro tutto il dict total e scrivo il summary, targets e ogni annotation +outFileSummary.write("-Summary_Total\n") +outFileSummary.write('targets' + '\t'+'\t'.join(str(i) for i in totalDict['targets'])+'\n') +for elem in sorted(totalDict.keys(), key = lambda s : s.lower()): + if elem == 'targets': + continue + outFileSummary.write(str(elem)+'\t'+'\t'.join(str(i) for i in totalDict[elem])+'\n') + + +for elem in guideDict.keys(): + outFileSummary.write("-Summary_"+str(elem)+'\n') + outFileSummary.write('targets'+'\t'+'\t'.join(str(i) for i in guideDict[elem]['targets'])+'\n') + for item in sorted(annotationsSet, key = lambda s : s.lower()): + outFileSummary.write(str(item)+'\t'+'\t'.join(str(i) for i in guideDict[elem][item])+'\n') + +#Write summaries for samples, pop, superpop +if summary_samples: + for guide in guideDict: + #Save sample summary + with open(outputFile + '.sample_annotation.' + guide +'.samples.txt', 'w+') as result: + result.write('-Summary_Total\n') + result.write('targets'+'\t'+'\t'.join(str(i) for i in guideDict[guide]['targets'])+'\n') + for item in sorted(annotationsSet, key = lambda s : s.lower()): + result.write(str(item)+'\t'+'\t'.join(str(i) for i in guideDict[guide][item])+'\n') + #Write sample specific counting, put [0]*10 if sample was not found + for sample in all_samples: + result.write('-Summary_' + sample + '\n') + try: + result.write('targets' + '\t' + '\t'.join(str(i) for i in count_sample[guide][sample]['targets']) + '\n') + except: #Sample not found in targets + result.write('targets' + '\t' + '\t'.join(str(i) for i in [0]*10) + '\n') + for item in sorted(annotationsSet, key = lambda s : s.lower()): + try: + result.write(item + '\t' + '\t'.join(str(i) for i in count_sample[guide][sample][item]) + '\n') + except: + result.write(item + '\t' + '\t'.join(str(i) for i in [0]*10) + '\n') + + #Save population summary + with open(outputFile + '.sample_annotation.' + guide +'.population.txt', 'w+') as result: + result.write('-Summary_Total\n') + result.write('targets'+'\t'+'\t'.join(str(i) for i in guideDict[guide]['targets'])+'\n') + for item in sorted(annotationsSet, key = lambda s : s.lower()): + result.write(str(item)+'\t'+'\t'.join(str(i) for i in guideDict[guide][item])+'\n') + #Write population specific counting, put [0]*10 if sample was not found + for population in set(all_pop): + result.write('-Summary_' + population + '\n') + try: + result.write('targets' + '\t' + '\t'.join(str(i) for i in count_pop[guide][population]['targets']) + '\n') + except: #Sample not found in targets + result.write('targets' + '\t' + '\t'.join(str(i) for i in [0]*10) + '\n') + for item in sorted(annotationsSet, key = lambda s : s.lower()): + try: + result.write(item + '\t' + '\t'.join(str(i) for i in count_pop[guide][population][item]) + '\n') + except: + result.write(item + '\t' + '\t'.join(str(i) for i in [0]*10) + '\n') + + #Save superpopulation summary + with open(outputFile + '.sample_annotation.' + guide +'.superpopulation.txt', 'w+') as result: + result.write('-Summary_Total\n') + result.write('targets'+'\t'+'\t'.join(str(i) for i in guideDict[guide]['targets'])+'\n') + for item in sorted(annotationsSet, key = lambda s : s.lower()): + result.write(str(item)+'\t'+'\t'.join(str(i) for i in guideDict[guide][item])+'\n') + #Write superpopulation specific counting, put [0]*10 if sample was not found + for superpop in superpopulation: + result.write('-Summary_' + superpop + '\n') + try: + result.write('targets' + '\t' + '\t'.join(str(i) for i in count_superpop[guide][superpop]['targets']) + '\n') + except: #Sample not found in targets + result.write('targets' + '\t' + '\t'.join(str(i) for i in [0]*10) + '\n') + for item in sorted(annotationsSet, key = lambda s : s.lower()): + try: + result.write(item + '\t' + '\t'.join(str(i) for i in count_superpop[guide][superpop][item]) + '\n') + except: + result.write(item + '\t' + '\t'.join(str(i) for i in [0]*10) + '\n') + + +#Write sumref for barplot for targets in top1 form of var/ref search +if summary_barplot_from_total: + with open(outputFile + '.sumref.Annotation.summary.txt', 'w+') as result: + result.write('-Summary_Total\n') + result.write('targets'+'\t'+'\t'.join(str(i - count_unique['targets'][pos]) for pos,i in enumerate(totalDict['targets'])) + '\n') + for elem in sorted(annotationsSet, key = lambda s : s.lower()): + result.write(str(elem)+'\t'+'\t'.join(str(i - count_unique[elem][pos]) for pos, i in enumerate(totalDict[elem]))+'\n') + for guide in count_unique_for_guide: + result.write('-Summary_' + guide + '\n') + result.write('targets' + '\t' + '\t'.join(str(i - count_unique_for_guide[guide]['targets'][pos]) for pos,i in enumerate(guideDict[guide]['targets'])) + '\n') + for annotation in sorted(annotationsSet, key = lambda s : s.lower()): + result.write(annotation + '\t' + '\t'.join(str(i - count_unique_for_guide[guide][annotation][pos]) for pos, i in enumerate(guideDict[guide][annotation])) + '\n') + +#SAVE SCORES# +with open( 'acfd.txt', 'w+') as res, open(sys.argv[8], 'r') as guides: + man = multiprocessing.Manager() + shared_doench = man.list() #list containing max doech for each thread + guides = guides.read().strip().split('\n') + for g in guides: + guides_dict_doench[g] = 0 + if g not in guides_dict: + guides_dict[g] = 0 + if g not in targets_for_doench: + guides_dict_doench[g] = 0 + else: + if len (targets_for_doench[g]) > SIZE_DOENCH: + jobs = [] + remaining_splits = (len(targets_for_doench[g])//SIZE_DOENCH) + 1 + for i in range ((len(targets_for_doench[g])//SIZE_DOENCH) + 1): + for thr in range (min(N_THR, remaining_splits)): + p = multiprocessing.Process(target = doenchParallel, args=(np.asarray(targets_for_doench[g][i*N_THR*SIZE_DOENCH + thr*SIZE_DOENCH : min( i*N_THR*SIZE_DOENCH + (thr+1)*SIZE_DOENCH,len(targets_for_doench[g]))]), model, shared_doench,) ) + remaining_splits -= 1 + p.start() + jobs.append(p) + for i in jobs: + i.join() + + guides_dict_doench[g] = max(shared_doench) + shared_doench = man.list() + else: + start_time = time.time() + doench_score = azimuth.model_comparison.predict(np.asarray(targets_for_doench[g]), None, None, model= model, pam_audit=False) + doench_score = [np.around(i * 100) for i in doench_score] + guides_dict_doench[g] = int(max(doench_score)) + res.write(g + '\t' + str(guides_dict[g]) + '\t' + str(guides_dict_doench[g]) + '\n') + +#Save additional values from semicommon for general guide table +with open(outputFile + '.addToGeneralTable.txt', 'w+') as add_file: + for guide in add_to_general_table: + add_file.write(guide + '\t' + '\t'.join([str(x) for x in add_to_general_table[guide]]) + '\n') + + + +if total_error > 0: + print('Skipped SNP:', total_error) +print('Annotation: Total progress 100%') +print("ANNOTATION COMPLETED IN: %s seconds" % (time.time() - start_time_total)) diff --git a/OldScripts/app_simplified.py b/OldScripts/app_simplified.py new file mode 100644 index 0000000..6c97daa --- /dev/null +++ b/OldScripts/app_simplified.py @@ -0,0 +1,770 @@ +import dash +from dash.dependencies import Input, Output, State +import dash_core_components as dcc +import dash_html_components as html +import dash_daq as daq +import dash_table +from dash.exceptions import PreventUpdate +from os import listdir #for getting directories +from os.path import isfile, isdir,join #for getting directories +import subprocess +import base64 #for decoding upload content +import io #for decoding upload content +import pandas as pd #for dash table +import json #for getting and saving report images list +from os import getcwd +import time #measure time for loading df table +from flask_caching import Cache #for cache of .targets or .scores +import os +import string #for job id +import random #for job id +import sys #for sys.exit() +import filecmp #check if Params files are equals + +PAGE_SIZE = 10 #number of entries in each page of the table in view report + +external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] +app = dash.Dash(__name__, external_stylesheets=external_stylesheets) +app.config['suppress_callback_exceptions'] = True #necessary if update element in a callback generated in another callback +app.css.config.serve_locally = True +app.scripts.config.serve_locally = True + +CACHE_CONFIG = { + # try 'filesystem' if you don't want to setup redis + 'CACHE_TYPE': 'filesystem', + 'CACHE_DIR': ('Cache')#os.environ.get('REDIS_URL', 'localhost:6379') +} +cache = Cache() +cache.init_app(app.server, config=CACHE_CONFIG) +app_location = os.path.dirname(os.path.abspath(__file__)) + '/' +operators = [['ge ', '>='], + ['le ', '<='], + ['lt ', '<'], + ['gt ', '>'], + ['ne ', '!='], + ['eq ', '='], + ['contains ']] #for filtering + +#Dropdown available genomes +onlydir = [f for f in listdir('Genomes') if isdir(join('Genomes', f))] +gen_dir = [] +for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + +#Dropdown available PAM +onlyfile = [f for f in listdir('pam') if isfile(join('pam', f))] +pam_file = [] +for pam_name in onlyfile: + pam_file.append({'label': pam_name, 'value' : pam_name}) + +#Dropdown available Variants +onlydir = [f for f in listdir('Variants') if isdir(join('Variants', f))] +var_dir = [] +for dir in onlydir: + var_dir.append({'label': dir, 'value' : dir}) + +#For multipage +app.layout = html.Div([ + dcc.Location(id='url', refresh=False), + html.Div(id='page-content'), + html.P(id = 'signal', style = {'visibility':'hidden'}) +]) + +# final_list.append( +# html.Div( +# [ +# html.P('Insert Job title: ', style = {'padding-top':'5px'}), +# dcc.Input(id = 'job-name', size = '30') +# ], +# className = 'flex-job-title', +# style = {'margin':'1%', 'width':'23%'} +# ) +# ) +# final_list.append( +# html.Div( +# [ +# html.P('Insert Email: ', style = {'padding-top':'5px'}), +# dcc.Input(id = 'email', size = '30') +# ], +# className = 'flex-job-title', +# style = {'margin':'1%', 'width':'23%'} +# ) +# ) + + + +#new final_list +final_list = [] +final_list.extend([html.H1('CRISPRitz Web Application'), + html.Div(children=''' + CRISPRitz is a software package containing 5 different tools dedicated to perform predictive analysis and result assessement on CRISPR/Cas experiments. + '''), + html.P()]) + +final_list.append( + html.Div( + [ + html.P(['Download the offline version here: ', html.A('InfOmics/CRISPRitz', href = 'https://github.com/InfOmics/CRISPRitz', target="_blank"), ' or ', html.A('Pinellolab/CRISPRitz', href = 'https://github.com/pinellolab/CRISPRitz', target="_blank") ]) + ] + ) +) + +final_list.append( + html.Div( + html.Div( + [ + html.Div( + [ + html.H3('STEP 1', style = {'margin-top':'0'}), + html.P('Select a genome'), + html.Div( + dcc.Dropdown(options = gen_dir, clearable = False, id = "available-genome", style = {'width':'75%'}), + #style = {'width':'50%'} + ), + html.P('Add a genome variant'), + html.Div( + dcc.Dropdown(options = var_dir, clearable = False, id = 'available-variant', style = {'width':'75%'}) + ), + html.Div( + [ + html.Div( + [ + html.P('Select PAM'), + html.Div( + dcc.Dropdown(options = pam_file, clearable = False, id = 'available-pam', style = {'width':'75%'}) + ) + ], + style = {'flex':'0 0 50%'} + ), + #html.P('or'), + html.Div( + [ + html.P('Insert custom PAM'), + dcc.Input(type = 'text', id = 'custom-pam', placeholder = 'NGG') + ] + ) + ], + id = 'div-pam', + className = 'flex-div-pam' + ) + ], + id = 'step1', + style = {'flex':'0 0 40%'} + ), + html.Div(style = {'border-right':'solid 1px white'}), + html.Div( + [ + html.H3('STEP 2', style = {'margin-top':'0'}), + html.Div( + [ + html.Div( + [ + + html.Div( + [ + html.P(['Insert crRNA sequence(s)', html.Abbr('\uD83D\uDEC8', style = {'text-decoration':'none'} ,title = 'One sequence per line. All sequences must have the same lenght and PAM characters are not required')], style = {'word-wrap': 'break-word'}), + + dcc.Textarea(id = 'text-guides', placeholder = 'GAGTCCGAGCAGAAGAAGAA\nCCATCGGTGGCCGTTTGCCC', style = {'width':'275px', 'height':'160px'}), + html.P('or', style = {'position': 'relative', 'left':'50%'}), + html.Div( + [ + html.Div( + [ + dcc.Upload('Upload file with crRNA sequences', id = 'upload-guides') + ], + style={ + 'width': '100%', + 'height': '60px', + 'lineHeight': '60px', + 'borderWidth': '1px', + 'borderStyle': 'dashed', + 'borderRadius': '5px', + 'textAlign': 'center', + #'margin': '10px' + } + ), + html.P('', id = 'uploaded-filename',style = {'width':'275px', 'visibility':'hidden', 'display':'inline-block'}) + ], + style = {'text-align':'center'} + ) + ], + style = {'width':'275px'} #same as text-area + ) + ], + id = 'div-guides' + ), + html.Div( + [ + html.P('Allowed mismatches'), + dcc.Input(value = '0', id = 'mms', type = 'number', min = '0', style = {'width':'60px'}), + html.P('Bulge DNA size'), + dcc.Input(value = '0', id = 'dna', type = 'number', min = '0', style = {'width':'60px'}), + html.P('Bulge RNA size'), + dcc.Input(value = '0', id = 'rna', type = 'number', min = '0', style = {'width':'60px'}) + ] + ) + ], + className = 'flex-step2' + ) + + ], + id = 'step2', + style = {'flex':'0 0 40%'} + + ), + html.Div(style = {'border-right':'solid 1px white'}), + html.Div( + [ + html.H3('Submit', style = {'margin-top':'0'}), + html.Div( + [ + html.Button('Submit', id = 'submit-job') + ], + style = {'display':'inline-block', 'margin':'0 auto'} + ) + ], + id = 'step3', + style = {'tex-align':'center'} + ) + ], + id = 'div-steps', + style = {'margin':'1%'}, + className = 'flex-div-steps' + ), + style = {'background-color':'rgba(154, 208, 150, 0.39)', 'border-radius': '10px', 'border':'1px solid black'}, + id = 'steps-background' + ) +) +index_page = html.Div(final_list, style = {'margin':'1%'}) + +#Load Page +final_list = [] +final_list.append(html.H1('CRISPRitz Web Application')) +final_list.append( + html.Div( + html.Div( + html.Div( + [ + html.P('Job submitted. Copy this link to view the status and the result page '), + html.Div( + html.P('link', id = 'job-link'), + style = {'border':'2px solid', 'border-color':'blue' ,'width':'70%','display':'inline-block', 'margin':'5px'} + ) + ], + style = {'display':'inline-block'} + ), + style = {'display':'inline-block','background-color':'rgba(154, 208, 150, 0.39)', 'border-radius': '10px', 'border':'1px solid black', 'width':'70%'} + ), + style = {'text-align':'center'} + ) +) + +final_list.append( + html.Div( + [ + html.H4('Status report'), + html.Div( + [ + html.Div( + html.Ul( + [ + html.Li('Adding variants'), + html.Li('Searching crRNA'), + html.Li('Generating report') + ] + ), + style = {'flex':'0 0 20%'} + ), + html.Div( + html.Ul( + [ + html.Li('To do', style = {'color':'red'}, id = 'add-variants-status'), + html.Li('To do', style = {'color':'red'}, id = 'search-status'), + html.Li('To do', style = {'color':'red'}, id = 'generate-report-status') + ], + style = {'list-style-type':'none'} + ) + ) + ], + className = 'flex-status' + ), + html.Div( + dcc.Link('View Results', style = {'visibility':'hidden'}, id = 'view-results') + ) + ], + id = 'div-status-report' + ) +) + +final_list.append(html.P('', id = 'done')) + +final_list.append(dcc.Interval(id = 'load-page-check', interval=3*1000)) +load_page = html.Div(final_list, style = {'margin':'1%'}) + +#Result page +final_list = [] +final_list.append(html.H1('CRISPRitz Web Application')) + +col_list = ['BulgeType', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Direction', 'Mismatches', 'BulgeSize', 'CFD', 'Doench2016'] +col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric'] +cols = [{"name": i, "id": i, 'type':t} for i,t in zip(col_list, col_type)] +final_list.append( + html.Div( + dash_table.DataTable( + id='result-table', + columns=cols, + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='' + ), + id = 'div-result-table' + ) +) + +final_list.append(html.Br()) +final_list.append( + html.Div( + [ + dash_table.DataTable( + id = 'guide-table', + columns = [{'name':'Available Guides', 'id':'Guides', 'type':'text'}], + page_size=PAGE_SIZE + + ), + html.Div( + [ + html.P('Select the mismatch value'), + dcc.Dropdown(id = 'mms-dropdown', style = {'flex':'0 0 5%'}, clearable = False) + ] + ), + html.Div( + html.A( + html.Img(id = 'radar-img', width="100%", #height="30%", + + ), + + target="_blank", + id = 'link-radar' + + ), + style = {'flex':'0 0 30%'} + ), + html.Div( + html.A( + html.Img(id = 'barplot-img', width="100%", #height="30%", + + ), + + target="_blank", + id = 'link-barplot' + + ), + style = {'flex':'0 0 30%'} + ) + + ], + className = 'flex-view-images' + ) +) + +result_page = html.Div(final_list, style = {'margin':'1%'}) +##################################################CALLBACKS################################################## + +#Submit Job, change url +@app.callback( + [Output('url', 'pathname'), + Output('url','search')], + [Input('submit-job', 'n_clicks')], + [State('url', 'href'), + State('available-genome', 'value'), + State('available-variant', 'value'), + State('available-pam','value'), + State('custom-pam','value'), + State('text-guides', 'value'), + State('upload-guides','contents'), + State('mms','value'), + State('dna','value'), + State('rna','value')] +) +def changeUrl(n, href, genome_ref, variant, pam, custom_pam, text_guides, file_guides, mms, dna, rna): #NOTE startJob + if n is None: + raise PreventUpdate + + #TODO check se input è corretto + + + job_id = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 10)) + result_dir = 'Results/' + job_id + subprocess.run(['mkdir ' + result_dir], shell = True) + + enrich = True + index = True + search_index = True + search = True + annotation = True + report = True + + #Check which tool to use + #TODO controllare se add-variants mi crea una cartella all'interno di SNP genomes, nel caso fare in modo che lo faccia + + #Enrichment + if variant is None: + variant = 'None' + enrich = False + genome_enr = genome_ref + else: + all_genome_enr = [f for f in listdir('variants_genome/SNPs_genome') if isdir(join('variants_genome/SNPs_genome', f))] + if variant is 'None': + genome_enr = genome_ref + else: + genome_enr = genome_ref + '_' + variant + if (genome_ref + '_' + variant in all_genome_enr): #NOTE Enriched genomes dir names are genome_ref name + symbol + variant_dir name + enrich = False + + #subprocess.run(['crispritz.py add-variants ' + 'Variants/' + variant + ' ' + 'Genomes/' + genome_ref], shell = True) + #Indexing and search + #NOTE Indexed genomes names are PAM + _ + bMax + _ + genome_ref/genome_enr + all_genomes_idx = [f for f in listdir('genome_library') if isdir(join('genome_library', f))] + + #TODO if pam input area, find way to select left or right of guide + pam_len = 0 + if custom_pam is not None and custom_pam is not '': + pam_char = custom_pam + pam_len = len(pam_char) + #Save to file as NNNN...PAM, but first check what guides have been inserted + if text_guides is not None and text_guides is not '': + n_number = len(text_guides.split('\n')[0]) + else: + decoded_guides = parse_contents(file_guides).decode('utf-8') + n_number = len(decoded_guides.split('\n')[0]) - decoded_guides.split('\n')[0].count('N') + with open(result_dir + '/pam.txt', 'w') as pam_file: + pam_file.write(('N' * n_number) + pam_char) + pam = result_dir + '/pam.txt' + else: + with open('pam/' + pam) as pam_file: + pam_char = pam_file.readline() + + if int(pam_char.split(' ')[-1]) < 0: + end_idx = int(pam_char.split(' ')[-1]) * (-1) + pam_char = pam_char.split(' ')[0][0 : end_idx] + pam_len = end_idx + else: + end_idx = int(pam_char.split(' ')[-1]) + pam_char = pam_char.split(' ')[0][end_idx * (-1):] + pam_len = end_idx + subprocess.run(['cp pam/' + pam + ' ' + result_dir + '/pam.txt'], shell = True) + pam = result_dir + '/pam.txt' + + guides_file = result_dir + '/guides.txt' + if text_guides is not None and text_guides is not '': + save_guides_file = open(result_dir + '/guides.txt', 'w') + text_guides = text_guides.replace('\n', 'N' * pam_len + '\n') + 'N' * pam_len #TODO what if pam at beginning? + save_guides_file.write(text_guides) + save_guides_file.close() + else: + decoded_guides = parse_contents(file_guides).decode('utf-8') + save_guides_file = open(result_dir + '/guides.txt', 'w') + save_guides_file.write(decoded_guides) + save_guides_file.close() + + if (int(dna) == 0 and int(rna) == 0): + index = False + search_index = False + max_bulges = rna + if (int(dna) > int(rna)): + max_bulges = dna + if (index and (pam_char + '_' + str(max_bulges) + '_' + genome_ref + '_' + variant) in all_genomes_idx): + index = False + + if (search_index): + search = False + # else: + # search_index = False + + if variant is 'None': + genome_idx = pam_char + '_' + str(max_bulges) + '_' + genome_ref + else: + genome_idx = pam_char + '_' + str(max_bulges) + '_' + genome_ref + '_' + variant + + #Create Params.txt file + with open(result_dir + '/Params.txt', 'w') as p: + p.write('Genome_ref\t' + genome_enr + '\n') + if search_index: + p.write('Genome_idx\t' + genome_idx + '\n') + else: + p.write('Genome_idx\t' + 'None\n') + p.write('Variant\t' + str(variant) + '\n') + p.write('Pam\t' + pam_char + '\n') + p.write('Max_bulges\t' + str(max_bulges) + '\n') + p.write('Mismatches\t' + str(mms) + '\n') + p.write('DNA\t' + str(dna) + '\n') + p.write('RNA\t' + str(rna) + '\n') + p.close() + + + + #Check if input parameters (mms, bulges, pam, guides, genome) are the same as a previous search + all_result_dirs = [f for f in listdir('Results') if isdir(join('Results', f))] + all_result_dirs.remove(job_id) + for check_param_dir in all_result_dirs: + if os.path.exists('Results/' + check_param_dir + '/Params.txt'): + print('checkparamdir:', check_param_dir) + if (filecmp.cmp('Results/' + check_param_dir + '/Params.txt', result_dir + '/Params.txt' )): + search = False + search_index = False + subprocess.run(['ln -s $PWD/Results/' + check_param_dir + '/' + check_param_dir + '* ' + result_dir + '/'], shell = True) #TODO copy result from one directory to the current one or create simlink + subprocess.run(['ln -s $PWD/Results/' + check_param_dir + '/*.png ' + result_dir + '/'], shell = True) + subprocess.run(['rename \'s/' + check_param_dir + '/' + job_id + '/g\' ' + result_dir + '/*'], shell = True) + break #BUG manca il controllo sulle guide + #Annotation + if (not search and not search_index): + annotation = False #TODO copy result from one directory to the current one or create simlink + + #Generate report + if (not enrich and not index and not search and not search_index): + report = False #TODO copy result from one directory to the current one or create simlink + #TODO if human genome -> annotation = human genome. mouse -> annotation mouse etc + subprocess.Popen(['assets/./submit_job.sh ' + 'Results/' + job_id + ' ' + str(variant) + ' ' + 'Genomes/' + genome_ref + ' ' + 'variants_genome/SNPs_genome/' + genome_enr + ' ' + 'genome_library/' + genome_idx + ( + ' ' + pam + ' ' + guides_file + ' ' + str(mms) + ' ' + str(dna) + ' ' + str(rna) + ' ' + str(enrich) + ' ' + str(index) + ' ' + str(search_index) + ' ' + str(search) + ( + ' ' + str(annotation) + ' ' + str(report))) ], shell = True) + return '/load','?job=' + job_id + +#When url changed, load new page +@app.callback( + [Output('page-content', 'children'), + Output('job-link', 'children')], + [Input('url', 'pathname')], + [State('url','href'), + State('url','search') + ] +) +def changePage(path, href, search): + + if path == '/load': + return load_page, 'http://127.0.0.1:8050/load' + search #NOTE change the url part when DNS are changed + if path == '/result': + return result_page, 'http://127.0.0.1:8050/load' + search + return index_page, '' + +#Check end job +@app.callback( + [Output('view-results', 'style'), + Output('add-variants-status', 'children'), + Output('search-status', 'children'), + Output('generate-report-status', 'children'), + Output('view-results','href')], + [Input('load-page-check', 'n_intervals')], + [State('url', 'search')] +) +def refreshSearch(n, dir_name): + if n is None: + raise PreventUpdate #TODO fa un controllo subito, così l'utente non deve aspettare 3 secondi per l'update + + onlydir = [f for f in listdir('Results') if isdir(join('Results', f))] + current_job_dir = 'Results/' + dir_name.split('=')[-1] + '/' + if dir_name.split('=')[-1] in onlydir: + onlyfile = [f for f in listdir(current_job_dir) if isfile(join(current_job_dir, f))] + if 'log.txt' in onlyfile: + with open(current_job_dir + 'log.txt') as log: + all_done = 0 + add_var_status = html.P('To do', style = {'color':'red'}) + search_status = html.P('To do', style = {'color':'red'}) + report_status = html.P('To do', style = {'color':'red'}) + current_log = log.read() + if ('Add-variants\tDone' in current_log): + add_var_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if ('Search-index\tDone' in current_log or 'Search\tDone' in current_log): + search_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if ('Report\tDone' in current_log): + report_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if all_done == 3: + return {'visibility':'visible'}, add_var_status, search_status, report_status, '/result?job=' + dir_name.split('=')[-1] + else: + return {'visibility':'hidden'}, add_var_status, search_status, report_status,'' + raise PreventUpdate + +#Perform expensive loading of a dataframe and save result into 'global store' +#Cache are in the Cache directory +@cache.memoize() +def global_store(value): + + if value is None: + return '' + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('scores.txt') ] + if not target: + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('targets.txt') ] + + df = pd.read_csv('Results/' +value + '/' + target[0], sep = '\t') + df.rename(columns = {"#Bulge type":'BulgeType', '#Bulge_type':'BulgeType','Bulge Size': 'BulgeSize', 'Bulge_Size': 'BulgeSize', 'Doench 2016':'Doench2016','Doench_2016':'Doench2016'}, inplace = True) + return df + +#Callback to populate the tab, note that it's called when the result_page is loaded (dash implementation), so we do not use raise update to block this first callback +@app.callback( + [Output('signal','children'), + Output('result-table','page_current'), + Output('result-table', "sort_by"), + Output('result-table','filter_query')], + [Input('url', 'pathname')], + [State('url', 'search')] +) +def populateTable(pathname, search): + if pathname != '/result': + raise PreventUpdate + + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + #print('JOB_ID: ', job_id) + global_store(job_id) + return job_id, 0, [], '' + +#Send the data when next or prev button is clicked on the result table +@app.callback( + [Output('result-table', 'data'), + Output('guide-table', 'data'), + Output('mms-dropdown','options')], + [Input('signal', 'children'), + Input('result-table', "page_current"), + Input('result-table', "page_size"), + Input('result-table', "sort_by"), + Input('result-table', 'filter_query')] +) +def update_table(value, page_current,page_size, sort_by, filter): + #print('signal_children', value) + if value is None: + raise PreventUpdate + + + filtering_expressions = filter.split(' && ') + df = global_store(value) + dff = df + for filter_part in filtering_expressions: + col_name, operator, filter_value = split_filter_part(filter_part) + + if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # these operators match pandas series operator method names + dff = dff.loc[getattr(dff[col_name], operator)(filter_value)] + elif operator == 'contains': + dff = dff.loc[dff[col_name].str.contains(filter_value)] + elif operator == 'datestartswith': + # this is a simplification of the front-end filtering logic, + # only works with complete fields in standard format + dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + if len(sort_by): + dff = dff.sort_values( + [col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False + ) + + #Load guide table + df_guide = pd.read_csv('Results/' + value + '/guides.txt', names = ['Guides']) + + #Load mismatches + with open('Results/' + value + '/Params.txt') as p: + + mms = (next(s for s in p.read().split('\n') if 'Mismatches' in s)).split('\t')[-1] + mms = int(mms[0]) + mms_values = [{'label':i, 'value':i} for i in range(mms + 1) ] + + return dff.iloc[ + page_current*page_size:(page_current+ 1)*page_size + ].to_dict('records'), df_guide.to_dict('records'), mms_values + +#For filtering +def split_filter_part(filter_part): + for operator_type in operators: + for operator in operator_type: + if operator in filter_part: + name_part, value_part = filter_part.split(operator, 1) + name = name_part[name_part.find('{') + 1: name_part.rfind('}')] + + value_part = value_part.strip() + v0 = value_part[0] + if (v0 == value_part[-1] and v0 in ("'", '"', '`')): + value = value_part[1: -1].replace('\\' + v0, v0) + else: + try: + value = float(value_part) + except ValueError: + value = value_part + + # word operators need spaces after them in the filter string, + # but we don't want these later + return name, operator_type[0].strip(), value + + return [None] * 3 + + +#Read the uploaded file and converts into bit +def parse_contents(contents): + content_type, content_string = contents.split(',') + + decoded = base64.b64decode(content_string) + return decoded + +#Show images +@app.callback( + [Output('barplot-img', 'src'), + Output('link-barplot', 'href'), + Output('radar-img','src'), + Output('link-radar','href')], + [Input('guide-table','selected_cells'), + Input('mms-dropdown','value')], + [State('guide-table', 'data'), + State('url','search')] +) +def testcel(sel_cel, mms, all_guides, job_id): + if sel_cel is None or mms is None: + raise PreventUpdate + job_id = job_id.split('=')[-1] + barplot_img = 'summary_histogram_' + str(mms) + 'mm.png' + try: #NOTE serve per non generare errori se il barplot non è stato fatto + barplot_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + barplot_img, 'rb').read()).decode()) + barplot_href = 'assets/Img/' + job_id + '/' + barplot_img + except: + barplot_src = '' + barplot_href = '' + guide = all_guides[int(sel_cel[0]['row'])]['Guides'] + radar_img = 'summary_single_guide_' + guide + '_' + str(mms) + 'mm.png' + radar_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + radar_img, 'rb').read()).decode()) + radar_href = 'assets/Img/' + job_id + '/' + radar_img + return barplot_src, barplot_href, radar_src, radar_href + +#Show filename if user upload a file +@app.callback( + [Output('uploaded-filename', 'children'), + Output('uploaded-filename', 'style')], + [Input('upload-guides', 'filename')] +) +def showUploadedFilename(name): + if name is None: + raise PreventUpdate + return 'Uploaded file: ' + name, {'visibility':'visible'} + +if __name__ == '__main__': + app.run_server(debug=True) + cache.clear() #delete cache when server is closed + + #TODO se faccio l'annotazione (stessi parametri) dei targets ottenuti da enr e ref genomes, poi posso usare i loro summary counts per fare il barplot, che dipende solo dai mm e non dalle guide + #BUG quando faccio scores, se ho dei char IUPAC nei targets, nel terminale posso vedere 150% 200% etc perche' il limite massimo e' basato su wc -l dei targets, ma possono aumentare se ho molti + #Iupac + #BUG emx1.txt error on loading extended_profile + + + #TODO bootstrap per fare il menu, aggiungere selezione per gecko e il barplot, inserire email quando finito, magari un menu avanzate per opzioni avanzate, divisione tra genomi e genomi enr (al posto + # della select varfile), cambiare il nome delle pam togliendo txt e mettendo nome giusto \ No newline at end of file diff --git a/OldScripts/app_simplified_bootstrap.py b/OldScripts/app_simplified_bootstrap.py new file mode 100644 index 0000000..ba11942 --- /dev/null +++ b/OldScripts/app_simplified_bootstrap.py @@ -0,0 +1,1061 @@ +import dash +from dash.dependencies import Input, Output, State +import dash_core_components as dcc +import dash_html_components as html +import dash_daq as daq +import dash_table +from dash.exceptions import PreventUpdate +from os import listdir #for getting directories +from os.path import isfile, isdir,join #for getting directories +import subprocess +import base64 #for decoding upload content +import io #for decoding upload content +import pandas as pd #for dash table +import json #for getting and saving report images list +from os import getcwd +import time #measure time for loading df table +from flask_caching import Cache #for cache of .targets or .scores +import os +import string #for job id +import random #for job id +import sys #for sys.exit() +import filecmp #check if Params files are equals +import dash_bootstrap_components as dbc +import collections #For check if guides are the same in two results +from datetime import datetime #For time when job submitted + +PAGE_SIZE = 10 #number of entries in each page of the table in view report + +external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css', dbc.themes.BOOTSTRAP] +app = dash.Dash(__name__, external_stylesheets=external_stylesheets) + +app.config['suppress_callback_exceptions'] = True #necessary if update element in a callback generated in another callback +app.css.config.serve_locally = True +app.scripts.config.serve_locally = True + +CACHE_CONFIG = { + # try 'filesystem' if you don't want to setup redis + 'CACHE_TYPE': 'filesystem', + 'CACHE_DIR': ('Cache')#os.environ.get('REDIS_URL', 'localhost:6379') +} +cache = Cache() +cache.init_app(app.server, config=CACHE_CONFIG) +app_location = os.path.dirname(os.path.abspath(__file__)) + '/' +operators = [['ge ', '>='], + ['le ', '<='], + ['lt ', '<'], + ['gt ', '>'], + ['ne ', '!='], + ['eq ', '='], + ['contains ']] #for filtering + +#Dropdown available genomes +onlydir = [f for f in listdir('Genomes') if isdir(join('Genomes', f))] +onlydir = [x.replace('_', ' ') for x in onlydir] +gen_dir = [] +for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + +#Dropdown available PAM +onlyfile = [f for f in listdir('pam') if isfile(join('pam', f))] +onlyfile = [x.replace('.txt', '') for x in onlyfile] #removed .txt for better visualization +pam_file = [] +for pam_name in onlyfile: + if 'NGG' in pam_name: + pam_file.append({'label':pam_name, 'value':pam_name}) + else: + pam_file.append({'label': pam_name, 'value' : pam_name, 'disabled':True}) + +#Dropdown available Variants +onlydir = [f for f in listdir('Variants') if isdir(join('Variants', f))] +var_dir = [] +for dir in onlydir: + var_dir.append({'label': dir, 'value' : dir}) + +#Available mismatches and bulges +av_mismatches = [{'label': i, 'value': i} for i in range(0, 8)] +av_bulges = [{'label': i, 'value': i} for i in range(0, 6)] +search_bar = dbc.Row( + [ + #dbc.Col(dbc.Input(type="search", placeholder="Search")), + dbc.Col(dbc.NavLink('HOME', active = True, href = 'http://127.0.0.1:8050', className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})), + dbc.Col(dbc.NavLink('ABOUT', active = True, href = 'http://127.0.0.1:8050', className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})), + dbc.Col( + dbc.DropdownMenu( + children=[ + dbc.DropdownMenuItem("Github", header=True), + dbc.DropdownMenuItem("InfOmics/CRISPRitz", href='https://github.com/InfOmics/CRISPRitz'), + dbc.DropdownMenuItem("Pinellolab/CRISPRitz", href='https://github.com/pinellolab/CRISPRitz'), + ], + #nav=True, + in_navbar=True, + label="Downloads", + style = {'width': '300px !important' } #'height': '400px !important' + ), + ), + dbc.Col(dbc.NavLink('CONTACTS', active = True, href = 'http://127.0.0.1:8050', className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})) + ], + no_gutters=True, + className="ml-auto flex-nowrap mt-3 mt-md-0", + align="center", +) +PLOTLY_LOGO = "https://images.plot.ly/logo/new-branding/plotly-logomark.png" + + +navbar = dbc.Navbar( + [ + html.A( + # Use row and col to control vertical alignment of logo / brand + dbc.Row( + [ + dbc.Col(html.Img(src=PLOTLY_LOGO, height="30px")), + dbc.Col(dbc.NavbarBrand("CRISPRitz Web App", className="ml-2", style = {'font-size': '30px'})) + ], + align="center", + no_gutters=True, + ), + href='http://127.0.0.1:8050', + ), + dbc.NavbarToggler(id="navbar-toggler"), + dbc.Collapse(search_bar, id="navbar-collapse", navbar=True), + ], + color="dark", + dark=True, +) + +#For multipage +app.layout = html.Div([ + navbar, + dcc.Location(id='url', refresh=False), + html.Div(id='page-content'), + html.P(id = 'signal', style = {'visibility':'hidden'}) +]) + + + +#new final_list +final_list = [] +final_list.extend([#html.H1('CRISPRitz Web Application'), + html.Div(children=''' + CRISPRitz is a software package containing 5 different tools dedicated to perform predictive analysis and result assessement on CRISPR/Cas experiments. + '''), + html.P()]) + +final_list.append( + html.Div( + [ + html.P(['Download the offline version here: ', html.A('InfOmics/CRISPRitz', href = 'https://github.com/InfOmics/CRISPRitz', target="_blank"), ' or ', html.A('Pinellolab/CRISPRitz', href = 'https://github.com/pinellolab/CRISPRitz', target="_blank") ]) + ] + ) +) +checklist_div = html.Div( + [ + dbc.FormGroup( + [ + dbc.Checkbox( + id="checkbox-gecko", className="form-check-input" + ), + dbc.Label( + #html.P(['Activate Gecko ', html.Abbr('comparison', title ='The results of your test guides will be compared with results obtained from a previous computed analysis on gecko library')]) , + html.P('Compare your results with the Gecko library'), + html_for="checkbox-gecko", + className="form-check-label", + ), + dbc.Checkbox( + id="checkbox-ref-comp", className="form-check-input" + ), + dbc.Label( + #html.P(['Activate Reference genome ', html.Abbr('comparison', title ='The results of your test guides will be compared with the results obtained from a computed analysis on the corresponding reference genome. Note: this may increase computational time')]) , + html.P('Compare your results with the corresponding reference genome'), + html_for="checkbox-ref-comp", + className="form-check-label", + ), + # dbc.Checkbox( + # id="checkbox-email", className="form-check-input" + # ), + # dbc.Label( + # 'Notify me by email', + # html_for="checkbox-email", + # className="form-check-label", + # ) + ], + check = True + ) + ], + id = 'checklist-test-div' +) + +modal = html.Div( + [ + dbc.Modal( + [ + dbc.ModalHeader("WARNING! Missing inputs"), + dbc.ModalBody('The following inputs are missing, please select values before submitting the job', id = 'warning-list'), + dbc.ModalFooter( + dbc.Button("Close", id="close" , className="modal-button") + ), + ], + id="modal", + centered=True + ), + ] +) + +final_list.append( + html.Div( + html.Div( + [ + modal, + html.Div( + [ + html.H3('STEP 1', style = {'margin-top':'0'}), + html.P('Select a genome'), + html.Div( + dcc.Dropdown(options = gen_dir, clearable = False, id = "available-genome",) #style = {'width':'75%'}) + ), + dbc.FormText('Note: Genomes enriched with variants are indicated with a \'+\' symbol', color='secondary'), + # html.P('Add a genome variant', style = {'visibility':'hidden'}), + # html.Div( + # dcc.Dropdown(options = var_dir,clearable = False, id = 'available-variant', style = {'width':'75%', 'visibility':'hidden'}) + # ), + html.Div( + [ + html.Div( + [ + html.P('Select PAM'), + html.Div( + dcc.Dropdown(options = pam_file, clearable = False, id = 'available-pam') + ) + ], + style = {'flex':'0 0 50%', 'margin-top': '10%'} + ), + #html.P('or'), + # html.Div( + # [ + # html.P('Insert custom PAM'), + # dcc.Input(type = 'text', id = 'custom-pam', placeholder = 'NGG', disabled = True) + # ] + # ) + ], + id = 'div-pam', + className = 'flex-div-pam' + ), + html.Div( + [ + # html.P(#'Send us a request to add a specific genome sequence or a variant, or download the offline version' + # [html.A('Contact us', href = 'http://127.0.0.1:8050', target="_blank"),' to request new genomes availability in the dropdown list', html.P('or'), html.P('Download the offline version'),], style = {'margin-top':'10px', 'text-align':'-webkit-center', 'position': 'relative', 'top': '25%'}), + html.Ul( + [html.Li( + [html.A('Contact us', href = 'http://127.0.0.1:8050', target="_blank"),' to request new genomes availability in the dropdown list'], + style = {'margin-top':'5%'} + ), + html.Li( + [html.A('Download', href = 'https://github.com/InfOmics/CRISPRitz'), ' the offline version for more custom parameters'] + ) + ], + style = {'list-style':'inside'} + ) + ], + style = {'height':'50%'} + ), + ], + id = 'step1', + style = {'flex':'0 0 30%', 'tex-align':'center'} + ), + html.Div(style = {'border-right':'solid 1px white'}), + html.Div( + [ + html.H3('STEP 2', style = {'margin-top':'0'}), + html.Div( + [ + html.Div( + [ + + html.Div( + [ + html.P([ + 'Insert crRNA sequence(s), one per line.', + html.P('Sequences must have the same length and be provided without the PAM sequence') , + #html.Abbr('\uD83D\uDEC8', style = {'text-decoration':'none'} ,title = 'One sequence per line. All sequences must have the same lenght and PAM characters are not required') + ], + style = {'word-wrap': 'break-word'}), + + dcc.Textarea(id = 'text-guides', placeholder = 'GAGTCCGAGCAGAAGAAGAA\nCCATCGGTGGCCGTTTGCCC', style = {'width':'450px', 'height':'160px'}), + #html.P('Note: a maximum number of 1000 sequences can be provided'), + dbc.FormText('Note: a maximum number of 1000 sequences can be provided', color = 'secondary') + ], + style = {'width':'450px'} #same as text-area + ) + ], + id = 'div-guides' + ), + html.Div( + [ + html.P('Allowed mismatches'), + dcc.Dropdown(options = av_mismatches, clearable = False, id = 'mms', style = {'width':'60px'}), + html.P('Bulge DNA size'), + dcc.Dropdown(options = av_bulges, clearable = False, id = 'dna', style = {'width':'60px'}), + html.P('Bulge RNA size'), + dcc.Dropdown(options = av_bulges, clearable = False, id = 'rna', style = {'width':'60px'}) + ] + ) + ], + className = 'flex-step2' + ) + + ], + id = 'step2', + style = {'flex':'0 0 40%'} + + ), + html.Div(style = {'border-right':'solid 1px white'}), + html.Div( + [ + html.H3('Advanced Options'), + checklist_div, + dcc.Checklist( + options = [ + #{'label':'Gecko comparison', 'value':'GC', 'disabled':False}, + #{'label':'Reference genome comparison', 'value':'RGC', 'disabled':False}, + {'label':'Notify me by email','value':'email', 'disabled':False}], + id = 'checklist-advanced', + ), + dbc.Fade( + [ + dbc.FormGroup( + [ + dbc.Label("Email", html_for="example-email"), + dbc.Input(type="email", id="example-email", placeholder="Enter email", className='exampleEmail'), + # dbc.FormText( + # "Are you on email? You simply have to be these days", + # color="secondary", + # ), + ] + ) + ], + id = 'fade', is_in= False, appear= False + ), + #html.H3('Submit', style = {'margin-top':'0'}), + html.Div( + [ + html.Button('Submit', id = 'check-job'), + html.Button('', id = 'submit-job', style = {'visibility':'hidden'}) + ], + style = {'display':'inline-block', 'margin':'0 auto'} #style="height:55px; width:150px" + ) + ], + id = 'step3', + style = {'tex-align':'center'}, + className = 'flex-step3' + ) + ], + id = 'div-steps', + style = {'margin':'1%'}, + className = 'flex-div-steps' + ), + style = {'background-color':'rgba(154, 208, 150, 0.39)', 'border-radius': '10px', 'border':'1px solid black'}, + id = 'steps-background' + ) +) +index_page = html.Div(final_list, style = {'margin':'1%'}) + +#Load Page +final_list = [] +#final_list.append(html.H1('CRISPRitz Web Application')) +final_list.append( + html.Div( + html.Div( + html.Div( + [ + html.P('Job submitted. Copy this link to view the status and the result page '), + html.Div( + html.P('link', id = 'job-link'), + style = {'border':'2px solid', 'border-color':'blue' ,'width':'70%','display':'inline-block', 'margin':'5px'} + ) + ], + style = {'display':'inline-block'} + ), + style = {'display':'inline-block','background-color':'rgba(154, 208, 150, 0.39)', 'border-radius': '10px', 'border':'1px solid black', 'width':'70%'} + ), + style = {'text-align':'center'} + ) +) + +final_list.append( + html.Div( + [ + html.H4('Status report'), + html.Div( + [ + html.Div( + html.Ul( + [ + html.Li('Searching crRNA'), + html.Li('Annotating result'), + html.Li('Generating report') + ] + ), + style = {'flex':'0 0 20%'} + ), + html.Div( + html.Ul( + [ + html.Li('To do', style = {'color':'red'}, id = 'search-status'), + html.Li('To do', style = {'color':'red'}, id = 'annotate-result-status'), + html.Li('To do', style = {'color':'red'}, id = 'generate-report-status') + ], + style = {'list-style-type':'none'} + ) + ) + ], + className = 'flex-status' + ), + html.Div( + [ + dcc.Link('View Results', style = {'visibility':'hidden'}, id = 'view-results'), + html.Div(id = 'no-directory-error') + ] + ) + ], + id = 'div-status-report' + ) +) + +final_list.append(html.P('', id = 'done')) + +final_list.append(dcc.Interval(id = 'load-page-check', interval=3*1000)) +load_page = html.Div(final_list, style = {'margin':'1%'}) + +#Result page +final_list = [] +#final_list.append(html.H1('CRISPRitz Web Application')) +final_list.append(html.Div(id='warning-div')) +col_list = ['BulgeType', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Direction', 'Mismatches', 'BulgeSize', 'CFD', 'Doench2016'] +col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric'] +cols = [{"name": i, "id": i, 'type':t} for i,t in zip(col_list, col_type)] +final_list.append( + html.Div( + dash_table.DataTable( + id='result-table', + columns=cols, + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='' + ), + id = 'div-result-table' + ) +) + +final_list.append(html.Br()) +final_list.append( + html.Div( + [ + dash_table.DataTable( + id = 'guide-table', + columns = [{'name':'Available Guides', 'id':'Guides', 'type':'text'}], + page_size=PAGE_SIZE + + ), + html.Div( + [ + html.P('Select the mismatch value'), + dcc.Dropdown(id = 'mms-dropdown', style = {'flex':'0 0 5%'}, clearable = False) + ] + ), + html.Div( + html.A( + html.Img(id = 'radar-img', width="100%", #height="30%", + + ), + + target="_blank", + id = 'link-radar' + + ), + style = {'flex':'0 0 30%'} + ), + html.Div( + html.A( + html.Img(id = 'barplot-img', width="100%", #height="30%", + + ), + + target="_blank", + id = 'link-barplot' + + ), + style = {'flex':'0 0 30%'} + ) + + ], + className = 'flex-view-images' + ) +) + +result_page = html.Div(final_list, style = {'margin':'1%'}) + + +#Test bootstrap page +final_list = [] +final_list.append( + html.Div( + [ + dbc.Button("Toggle", id="alert-toggle-auto", className="mr-1"), + html.Hr(), + dbc.Alert( + "Hello! I am an auto-dismissing alert!", + id="alert-auto", + is_open=True, + duration=4000, + ), + ] +) +) + +@app.callback( + Output("alert-auto", "is_open"), + [Input("alert-toggle-auto", "n_clicks")], + [State("alert-auto", "is_open")], +) +def toggle_alert(n, is_open): + if n: + return not is_open + return is_open + +test_page = html.Div(final_list, style = {'margin':'1%'}) +##################################################CALLBACKS################################################## + +#Fade in/out email +@app.callback( + Output("fade", "is_in"), + [Input("checklist-advanced", "value")], + [State("fade", "is_in")], +) +def toggle_fade(selected_options, is_in): + if selected_options is None: + return False + if 'email' in selected_options: + return True + return False + +#Email validity +@app.callback( + Output('example-email', 'style'), + [Input('example-email', 'value')] +) +def checkEmailValidity(val): + if val is None: + raise PreventUpdate + + if '@' in val: + return {'border':'1px solid #94f033', 'outline':'0'} + return {'border':'1px solid red'} + +#Check input presence +@app.callback( + [Output('submit-job', 'n_clicks'), + Output('modal', 'is_open'), + Output('available-genome', 'className'), + Output('available-pam', 'className'), + Output('text-guides', 'style'), + Output('mms', 'className'), + Output('dna', 'className'), + Output('rna', 'className'), + Output('warning-list', 'children')], + [Input('check-job','n_clicks'), + Input('close','n_clicks')], + [State('available-genome', 'value'), + State('available-pam','value'), + State('text-guides', 'value'), + State('mms','value'), + State('dna','value'), + State('rna','value'), + State("modal", "is_open")] +) +def checkInput(n, n_close, genome_selected, pam, text_guides, mms, dna, rna, is_open): + if n is None: + raise PreventUpdate + if is_open is None: + is_open = False + + classname_red = 'missing-input' + genome_update = None + pam_update = None + text_update = {'width':'450px', 'height':'160px'} + mms_update = None + dna_update = None + rna_update = None + update_style = False + miss_input_list = [] + + if genome_selected is None or genome_selected is '': + genome_update = classname_red + update_style = True + miss_input_list.append('Genome') + if pam is None or pam is '': + pam_update = classname_red + update_style = True + miss_input_list.append('PAM') + if text_guides is None or text_guides is '': + text_update = {'width':'450px', 'height':'160px','border': '1px solid red'} + update_style = True + miss_input_list.append('crRNA sequence(s)') + if mms is None or str(mms) is '': + mms_update = classname_red + update_style = True + miss_input_list.append('Allowed Mismatches') + if dna is None or str(dna) is '': + dna_update = classname_red + update_style = True + miss_input_list.append('Bulge DNA size') + if rna is None or str(rna) is '': + rna_update = classname_red + update_style = True + miss_input_list.append('Bulge RNA size') + miss_input = html.Div( + [ + html.P('The following inputs are missing:'), + html.Ul([html.Li(x) for x in miss_input_list]), + html.P('Please fill in the values before submitting the job') + ] + ) + + if not update_style: + return 1, False, genome_update, pam_update, text_update, mms_update, dna_update, rna_update, miss_input + return None, not is_open, genome_update, pam_update, text_update, mms_update, dna_update, rna_update, miss_input + +#Submit Job, change url +@app.callback( + [Output('url', 'pathname'), + Output('url','search')], + [Input('submit-job','n_clicks')], + [State('url', 'href'), + State('available-genome', 'value'), + State('available-pam','value'), + State('text-guides', 'value'), + State('mms','value'), + State('dna','value'), + State('rna','value'), + State('checkbox-gecko','checked'), + State('checkbox-ref-comp', 'checked'), + State('checklist-advanced', 'value'), + State('example-email','value')] +) +def changeUrl(n, href, genome_selected, pam, text_guides, mms, dna, rna, gecko_opt, genome_ref_opt, adv_opts,dest_email): #NOTE startJob + ''' + genome_selected can be Human genome (hg19), or Human Genome (hg19) + 1000 Genome Project, the '+' character defines the ref or enr version. + Note that pam parameter can be 5'-NGG-3', but the corresponding filename is 5'-NGG-3'.txt + Pam file (5'-NGG-3'.txt) is structured as NGG 3, or TTTN -4. The created pam.txt inside the result directory add the corresponding N's + Annotations path file is named genome_name_annotationpath.txt, where genome_name is the reference genome name + ''' + if n is None: + raise PreventUpdate + + #Check input, else give simple input + if genome_selected is None or genome_selected is '': + genome_selected = 'hg19_ref' + if pam is None or pam is '': + pam = '5\'-NGG-3\'' + if text_guides is None or text_guides is '': + text_guides = 'GAGTCCGAGCAGAAGAAGAA' + else: + text_guides = text_guides.strip() + if len(text_guides.split('\n')) > 1000: + text_guides = '\n'.join(text_guides.split('\n')[:1000]).strip() + if ( not all(len(elem) == len(text_guides.split('\n')[0]) for elem in text_guides.split('\n'))): + text_guides = selectSameLenGuides(text_guides) + + job_id = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 10)) + result_dir = 'Results/' + job_id + subprocess.run(['mkdir ' + result_dir], shell = True) + + search_index = True + search = True + annotation = True + report = True + gecko_comp = False + ref_comparison = False + send_email = False + if adv_opts is None: + adv_opts = [] + if gecko_opt: + gecko_comp = True + if genome_ref_opt: + ref_comparison = True + if 'email' in adv_opts and dest_email is not None and len(dest_email.split('@')) > 1 and dest_email.split('@')[-1] is not '': + send_email = True + with open(result_dir + '/email.txt', 'w') as e: + e.write(dest_email + '\n') + e.write('http://127.0.0.1:8050/load?job=' + job_id + '\n') + e.write(datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S") + '\n') + e.write('Job done. Parameters: etc etc') + e.close() + + #Set parameters + genome_selected = genome_selected.replace(' ', '_') + genome_ref = genome_selected.split('+')[0] #+ char to separate ref and vcf, eg Human_genome+1000_genome_project + if genome_ref == genome_selected: + ref_comparison = False + #NOTE Indexed genomes names are PAM + _ + bMax + _ + genome_selected + + pam_len = 0 + custom_pam = None + + with open('pam/' + pam + '.txt') as pam_file: + pam_char = pam_file.readline() + index_pam_value = pam_char.split(' ')[-1] + if int(pam_char.split(' ')[-1]) < 0: + end_idx = int(pam_char.split(' ')[-1]) * (-1) + pam_char = pam_char.split(' ')[0][0 : end_idx] + pam_len = end_idx + pam_begin = True + else: + end_idx = int(pam_char.split(' ')[-1]) + pam_char = pam_char.split(' ')[0][end_idx * (-1):] + pam_len = end_idx + pam_begin = False + + len_guides = len(text_guides.split('\n')[0]) + if (pam_begin): + pam_to_file = pam_char + ('N' * len_guides) + ' ' + index_pam_value + else: + pam_to_file = ('N' * len_guides) + pam_char + ' ' + index_pam_value + + save_pam_file = open(result_dir + '/pam.txt', 'w') + save_pam_file.write(pam_to_file) + save_pam_file.close() + pam = result_dir + '/pam.txt' + + guides_file = result_dir + '/guides.txt' + if text_guides is not None and text_guides is not '': + save_guides_file = open(result_dir + '/guides.txt', 'w') + if (pam_begin): + text_guides = 'N' * pam_len + text_guides.replace('\n', '\n' + 'N' * pam_len) + else: + text_guides = text_guides.replace('\n', 'N' * pam_len + '\n') + 'N' * pam_len + save_guides_file.write(text_guides) + save_guides_file.close() + + if (int(dna) == 0 and int(rna) == 0): + search_index = False + max_bulges = rna + if (int(dna) > int(rna)): + max_bulges = dna + + if (search_index): + search = False + + if int(max_bulges) <= 2: + genome_idx = pam_char + '_' + '2' + '_' + genome_selected + else: + genome_idx = pam_char + '_' + '5' + '_' + genome_selected + genome_idx_ref = genome_idx.split('+')[0] + + #Create Params.txt file + with open(result_dir + '/Params.txt', 'w') as p: + p.write('Genome_selected\t' + genome_selected + '\n') + p.write('Genome_ref\t' + genome_ref + '\n') + if search_index: + p.write('Genome_idx\t' + genome_idx + '\n') + else: + p.write('Genome_idx\t' + 'None\n') + p.write('Pam\t' + pam_char + '\n') + p.write('Max_bulges\t' + str(max_bulges) + '\n') + p.write('Mismatches\t' + str(mms) + '\n') + p.write('DNA\t' + str(dna) + '\n') + p.write('RNA\t' + str(rna) + '\n') + p.write('Gecko\t' + str(gecko_comp) + '\n') + p.write('Ref_comp\t' + str(ref_comparison) + '\n') + p.close() + + #Check if input parameters (mms, bulges, pam, guides, genome) are the same as a previous search + all_result_dirs = [f for f in listdir('Results') if isdir(join('Results', f))] + all_result_dirs.remove(job_id) + #all_result_dirs.remove('test') + for check_param_dir in all_result_dirs: + if os.path.exists('Results/' + check_param_dir + '/Params.txt'): + if os.path.exists('Results/' + check_param_dir + '/log.txt'): + with open('Results/' + check_param_dir + '/log.txt') as log: + if ('Job\tDone' in log.read()): + if (filecmp.cmp('Results/' + check_param_dir + '/Params.txt', result_dir + '/Params.txt' )): + guides1 = open('Results/' + check_param_dir + '/guides.txt').read().split('\n') + guides2 = open('Results/' + job_id + '/guides.txt').read().split('\n') + if (collections.Counter(guides1) == collections.Counter(guides2)): + search = False + search_index = False + subprocess.run(['cp $PWD/Results/' + check_param_dir + '/' + check_param_dir + '* ' + result_dir + '/'], shell = True) + subprocess.run(['cp $PWD/Results/' + check_param_dir + '/*.png ' + result_dir + '/'], shell = True) + subprocess.run(['rename \'s/' + check_param_dir + '/' + job_id + '/g\' ' + result_dir + '/*'], shell = True) + break + + #Annotation + if (not search and not search_index): + annotation = False + + #Generate report + if (not search and not search_index): + report = False + + annotation_filepath = [f for f in listdir('./') if isfile(join('./', f)) and f.startswith(genome_ref)] + + + subprocess.Popen(['assets/./submit_job.sh ' + 'Results/' + job_id + ' ' + 'Genomes/' + genome_selected + ' ' + 'Genomes/' + genome_ref + ' ' + 'genome_library/' + genome_idx + ( + ' ' + pam + ' ' + guides_file + ' ' + str(mms) + ' ' + str(dna) + ' ' + str(rna) + ' ' + str(search_index) + ' ' + str(search) + ' ' + str(annotation) + ( + ' ' + str(report) + ' ' + str(gecko_comp) + ' ' + str(ref_comparison) + ' ' + 'genome_library/' + genome_idx_ref + ' ' + str(send_email) + ' ' + annotation_filepath[0] + ) + )], shell = True) + return '/load','?job=' + job_id + +#When url changed, load new page +@app.callback( + [Output('page-content', 'children'), + Output('job-link', 'children')], + [Input('url', 'pathname')], + [State('url','href'), + State('url','search') + ] +) +def changePage(path, href, search): + + if path == '/load': + return load_page, 'http://127.0.0.1:8050/load' + search #NOTE change the url part when DNS are changed + if path == '/result': + return result_page, 'http://127.0.0.1:8050/load' + search + if path == '/test-page': + return test_page, 'http://127.0.0.1:8050/load' + search + return index_page, '' + +#Check end job +@app.callback( + [Output('view-results', 'style'), + Output('annotate-result-status', 'children'), + Output('search-status', 'children'), + Output('generate-report-status', 'children'), + Output('view-results','href'), + Output('no-directory-error', 'children')], + [Input('load-page-check', 'n_intervals')], + [State('url', 'search')] +) +def refreshSearch(n, dir_name): + if n is None: + raise PreventUpdate #TODO fa un controllo subito, così l'utente non deve aspettare 3 secondi per l'update + + onlydir = [f for f in listdir('Results') if isdir(join('Results', f))] + current_job_dir = 'Results/' + dir_name.split('=')[-1] + '/' + if dir_name.split('=')[-1] in onlydir: + onlyfile = [f for f in listdir(current_job_dir) if isfile(join(current_job_dir, f))] + if 'log.txt' in onlyfile: + with open(current_job_dir + 'log.txt') as log: + all_done = 0 + annotate_res_status = html.P('To do', style = {'color':'red'}) + search_status = html.P('To do', style = {'color':'red'}) + report_status = html.P('To do', style = {'color':'red'}) + current_log = log.read() + if ('Annotation\tDone' in current_log): + annotate_res_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if ('Search-index\tDone' in current_log or 'Search\tDone' in current_log): + search_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if ('Report\tDone' in current_log): + report_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if all_done == 3: + return {'visibility':'visible'}, annotate_res_status, search_status, report_status, '/result?job=' + dir_name.split('=')[-1], '' + else: + return {'visibility':'hidden'}, annotate_res_status, search_status, report_status,'', '' + return {'visibility':'hidden'}, html.P('Not available', style = {'color':'red'}), html.P('Not available', style = {'color':'red'}), html.P('Not available', style = {'color':'red'}), '', dbc.Alert("The selected result does not exist", color = "danger") + +#Perform expensive loading of a dataframe and save result into 'global store' +#Cache are in the Cache directory +@cache.memoize() +def global_store(value): + + if value is None: + return '' + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('scores.txt') ] + if not target: + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('targets.txt') ] + + df = pd.read_csv('Results/' +value + '/' + target[0], sep = '\t') + df.rename(columns = {"#Bulge type":'BulgeType', '#Bulge_type':'BulgeType','Bulge Size': 'BulgeSize', 'Bulge_Size': 'BulgeSize', 'Doench 2016':'Doench2016','Doench_2016':'Doench2016'}, inplace = True) + return df + +#Callback to populate the tab, note that it's called when the result_page is loaded (dash implementation), so we do not use raise update to block this first callback +@app.callback( + [Output('signal','children'), + Output('result-table','page_current'), + Output('result-table', "sort_by"), + Output('result-table','filter_query')], + [Input('url', 'pathname')], + [State('url', 'search')] +) +def populateTable(pathname, search): + if pathname != '/result': + raise PreventUpdate + + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + if(not isdir(job_directory)): + return 'not_exists', 0, [], '' + global_store(job_id) + return job_id, 0, [], '' + +#Send the data when next or prev button is clicked on the result table +@app.callback( + [Output('result-table', 'data'), + Output('guide-table', 'data'), + Output('mms-dropdown','options'), + Output('warning-div', 'children')], + [Input('signal', 'children'), + Input('result-table', "page_current"), + Input('result-table', "page_size"), + Input('result-table', "sort_by"), + Input('result-table', 'filter_query')] +) +def update_table(value, page_current,page_size, sort_by, filter): + #print('signal_children', value) + if value is None: + raise PreventUpdate + + if value == 'not_exists': + return [], [], [] , dbc.Alert("The selected result does not exist", color = "danger") + + filtering_expressions = filter.split(' && ') + df = global_store(value) + dff = df + for filter_part in filtering_expressions: + col_name, operator, filter_value = split_filter_part(filter_part) + + if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # these operators match pandas series operator method names + dff = dff.loc[getattr(dff[col_name], operator)(filter_value)] + elif operator == 'contains': + dff = dff.loc[dff[col_name].str.contains(filter_value)] + elif operator == 'datestartswith': + # this is a simplification of the front-end filtering logic, + # only works with complete fields in standard format + dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + if len(sort_by): + dff = dff.sort_values( + [col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False + ) + + #Load guide table + df_guide = pd.read_csv('Results/' + value + '/guides.txt', names = ['Guides']) + + #Load mismatches + with open('Results/' + value + '/Params.txt') as p: + + mms = (next(s for s in p.read().split('\n') if 'Mismatches' in s)).split('\t')[-1] + mms = int(mms[0]) + mms_values = [{'label':i, 'value':i} for i in range(mms + 1) ] + + #Check if results are not 0 + warning_no_res = '' + if (len(dff.index) == 0 ): + warning_no_res = dbc.Alert("No results were found with the given parameters", color = "warning") + + return dff.iloc[ + page_current*page_size:(page_current+ 1)*page_size + ].to_dict('records'), df_guide.to_dict('records'), mms_values, warning_no_res + +#For filtering +def split_filter_part(filter_part): + for operator_type in operators: + for operator in operator_type: + if operator in filter_part: + name_part, value_part = filter_part.split(operator, 1) + name = name_part[name_part.find('{') + 1: name_part.rfind('}')] + + value_part = value_part.strip() + v0 = value_part[0] + if (v0 == value_part[-1] and v0 in ("'", '"', '`')): + value = value_part[1: -1].replace('\\' + v0, v0) + else: + try: + value = float(value_part) + except ValueError: + value = value_part + + # word operators need spaces after them in the filter string, + # but we don't want these later + return name, operator_type[0].strip(), value + + return [None] * 3 + + +#Read the uploaded file and converts into bit +def parse_contents(contents): + content_type, content_string = contents.split(',') + + decoded = base64.b64decode(content_string) + return decoded + +#Show images +@app.callback( + [Output('barplot-img', 'src'), + Output('link-barplot', 'href'), + Output('radar-img','src'), + Output('link-radar','href')], + [Input('guide-table','selected_cells'), + Input('mms-dropdown','value')], + [State('guide-table', 'data'), + State('url','search')] +) +def showImages(sel_cel, mms, all_guides, job_id): + if sel_cel is None or mms is None: + raise PreventUpdate + job_id = job_id.split('=')[-1] + barplot_img = 'summary_histogram_' + str(mms) + 'mm.png' + try: #NOTE serve per non generare errori se il barplot non è stato fatto + barplot_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + barplot_img, 'rb').read()).decode()) + barplot_href = 'assets/Img/' + job_id + '/' + barplot_img + except: + barplot_src = '' + barplot_href = '' + guide = all_guides[int(sel_cel[0]['row'])]['Guides'] + radar_img = 'summary_single_guide_' + guide + '_' + str(mms) + 'mm.png' + try: + radar_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + radar_img, 'rb').read()).decode()) + radar_href = 'assets/Img/' + job_id + '/' + radar_img + except: + radar_src = '' + radar_href = '' + return barplot_src, barplot_href, radar_src, radar_href + + +#If the input guides are different len, select the ones with same length as the first +def selectSameLenGuides(list_guides): + selected_length = len(list_guides.split('\n')[0]) + same_len_guides_list = [] + for guide in list_guides.split('\n'): + if len(guide) == selected_length: + same_len_guides_list.append(guide) + same_len_guides = '\n'.join(same_len_guides_list).strip() + return same_len_guides + +if __name__ == '__main__': + app.run_server(debug=True) + cache.clear() #delete cache when server is closed + + #BUG quando faccio scores, se ho dei char IUPAC nei targets, nel terminale posso vedere 150% 200% etc perche' il limite massimo e' basato su wc -l dei targets, ma possono aumentare se ho molti + #Iupac + + + #TODO + # togliere caricamento del file per evitare confusione, \ No newline at end of file diff --git a/OldScripts/app_tabs.py b/OldScripts/app_tabs.py new file mode 100644 index 0000000..de660e6 --- /dev/null +++ b/OldScripts/app_tabs.py @@ -0,0 +1,1040 @@ +import dash +from dash.dependencies import Input, Output, State +import dash_core_components as dcc +import dash_html_components as html +import dash_daq as daq +import dash_table +from dash.exceptions import PreventUpdate +from os import listdir #for getting directories +from os.path import isfile, isdir,join #for getting directories +import subprocess +import base64 #for decoding upload content +import io #for decoding upload content +import pandas as pd #for dash table +import json #for getting and saving report images list +from os import getcwd +import time #measure time for loading df table +from flask_caching import Cache #for cache of .targets or .scores +import os + +PAGE_SIZE = 10 #number of entries in each page of the table in view report + +external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] +app = dash.Dash(__name__, external_stylesheets=external_stylesheets) +app.config['suppress_callback_exceptions'] = True #necessary if update element in a callback generated in another callback +app.css.config.serve_locally = True +app.scripts.config.serve_locally = True + +CACHE_CONFIG = { + # try 'filesystem' if you don't want to setup redis + 'CACHE_TYPE': 'filesystem', + 'CACHE_DIR': ('Cache')#os.environ.get('REDIS_URL', 'localhost:6379') +} +cache = Cache() +cache.init_app(app.server, config=CACHE_CONFIG) +app_location = os.path.dirname(os.path.abspath(__file__)) + '/' + +app.layout = html.Div([ + html.H1('CRISPRitz Web Application'), + html.Div(children=''' + CRISPRitz is a software package containing 5 different tools dedicated to perform predictive analysis and result assessement on CRISPR/Cas experiments. + '''), + html.P(), + dcc.Tabs(id="main-menu", value='Main menu', children=[ + dcc.Tab(label='Add variants', value='add-variants'), + dcc.Tab(label='Index genome', value='index-genome'), + dcc.Tab(label='Search', value='search'), + dcc.Tab(label='Annotate results', value='annotate-results'), + dcc.Tab(label='Generate report', value='generate-report'), + dcc.Tab(label='View report', value='view-report') + ]), + html.Div(id='tab-content') +]) + +operators = [['ge ', '>='], + ['le ', '<='], + ['lt ', '<'], + ['gt ', '>'], + ['ne ', '!='], + ['eq ', '='], + ['contains ']] #for filtering + +@app.callback(Output('tab-content', 'children'), + [Input('main-menu', 'value')]) +def render_content(tab): + if tab == 'add-variants': + final_list = [] + + #Dropdown available genomes + onlydir = [f for f in listdir('Genomes') if isdir(join('Genomes', f))] + gen_dir = [] + for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + final_list.append(html.P(["Select an available Genome ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your directory containing the fasta file(s) into the Genomes directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = gen_dir, clearable = False, id = "available-genomes-variants"), id = 'div-available-genomes-variants')) + + #Dropdown available Variants + onlydir = [f for f in listdir('Variants') if isdir(join('Variants', f))] + gen_dir = [] + for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + final_list.append(html.P(["Select an available Variant ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your directory containing the .vcf file(s) into the Variants directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = gen_dir, clearable = False, id = "available-variants-variants"), id = 'div-available-variants-variants')) + + final_list.append(html.Br()) + #Submit job + final_list.append(html.Button('Submit', id='submit-variants')) + final_list.append(html.Div(id='loop_breaker_container-variants', children=[])) #Needed for 'breaking' cicular dependency between button and div of spinner + final_list.append(html.Div('Adding variants', className = 'loader-name', id = 'id-loader-name-variants', style = {'visibility':'hidden'})) + final_list.append(html.Div('',className = 'loader', id='spinner-variants', style = {'visibility':'hidden'})) + final_list.append(html.Div(id = "executing-variants")) + return final_list + elif tab == 'index-genome': + final_list = [] + final_list.append(html.P('Tool to find the candidate targets in a genome starting from a PAM. The ouput is a set of files, containing all the sequences of candidate targets extracted from the genome.')) + + #Genome name + final_list.extend([html.Label('Insert the name for the output Genome Index'), dcc.Input(id = 'name-genome', placeholder='Example: hg19_ref', type='text')]) + + #Dropdown available genomes + onlydir = [f for f in listdir('Genomes') if isdir(join('Genomes', f))] + gen_dir = [] + for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + final_list.append(html.P(["Select an available Genome ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your directory containing the fasta file(s) into the Genomes directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = gen_dir, clearable = False, id = "available-genomes"), id = 'div-available-genomes')) + + #Dropdown available PAM + onlyfile = [f for f in listdir('pam') if isfile(join('pam', f))] + pam_file = [] + for pam_name in onlyfile: + pam_file.append({'label': pam_name, 'value' : pam_name}) + final_list.append(html.P(["Select an available PAM ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your PAM text file into the pam directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = pam_file, clearable = False, id = "available-pams"), id = 'div-available-pams')) + #TODO se lascio il rosso nel div, se metto un elemento rimane il rosso, sistemare o lasciare così + #Number of max bulges + final_list.extend([html.Label('Insert # of max allowed bulges'), dcc.Input(id = 'max-bulges', placeholder='Example: 2', type='number', min = 0)]) + final_list.append(html.P()) + + #Submit job + final_list.append(html.Button('Submit', id='submit-index-genome')) + final_list.append(html.Div(id='loop_breaker_container', children=[])) #Needed for 'breaking' cicular dependency between button and div of spinner + final_list.append(html.Div('Creating Index', className = 'loader-name', id = 'id-loader-name', style = {'visibility':'hidden'})) + final_list.append(html.Div('',className = 'loader', id='spinner', style = {'visibility':'hidden'})) + final_list.append(html.Div(id = "executing-index-genome")) + return final_list + + elif tab == 'search': + final_list = [] + final_list.append(html.P('Tool to perform off-target search on a genome (with or without variants) or genome index (with or without variants). The ouput is a set of files, one is a list of all targets and off-targets found, the others are profile files containing detailed information for each guide , like bp/mismatches and on/off-targets count.')) + + #Boolean switch for Index search + final_list.append(html.Div(daq.BooleanSwitch(on = False, label = "Use Index Search", labelPosition = "top", id = 'index-search'))) + + #Dropdown available genomes + onlydir = [f for f in listdir('Genomes') if isdir(join('Genomes', f))] + gen_dir = [] + for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + final_list.append(html.P(["Select an available Genome ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your directory containing the fasta file(s) into the Genomes directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = gen_dir, clearable = False, id = "available-genomes-search"), id = "div-available-genomes-search")) + + #Dropdown available PAM + onlyfile = [f for f in listdir('pam') if isfile(join('pam', f))] + pam_file = [] + for pam_name in onlyfile: + pam_file.append({'label': pam_name, 'value' : pam_name}) + final_list.append(html.P(["Select an available PAM ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your PAM text file into the pam directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = pam_file, clearable = False, id = "available-pams-search"), id = "div-available-pams-search")) + + #Dropdown available guide file + onlyfile = [f for f in listdir('guides') if isfile(join('guides', f))] + guide_file = [] + for guide_name in onlyfile: + guide_file.append({'label': guide_name, 'value' : guide_name}) + final_list.append(html.P(["Select an available Guide file ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your Guide text file into the guides directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = guide_file, clearable = False, id = "available-guides-search"), id = "div-available-guides-search")) + + + #Genome name + final_list.extend([html.Label('Insert the name for the output result files'), dcc.Input(id = 'name-result-file', placeholder='Example: emx1.hg19', type='text')]) + + #Number of Mimatches and DNA/RNA bulges + final_list.append( + html.Div( + [html.Div( + [html.Label('Insert # of mismatches'), + dcc.Input(id = 'max-mms', placeholder='Example: 2', type='number', min = 0)] + ), + html.Div( + [html.Label('Insert # of DNA bulges'), + dcc.Input(id = 'max-dna', placeholder='Example: 2', type='number', min = 0, disabled = True)] + ), + html.Div( + [html.Label('Insert # of RNA bulges'), + dcc.Input(id = 'max-rna', placeholder='Example: 2', type='number', min = 0, disabled = True)] + ), + html.Div( + [html.Label('Insert # of Threads'), + dcc.Input(id = 'max-thr', placeholder='Example: 2', type='number', min = 1, disabled = True)] + )], + id = "container-m-d-r", + className = "flex-container-mdr-search", + style = {'width' : '75%'} + ) + ) + + #Set Type of result + final_list.extend( + [html.Label('Select type of result:'), + dcc.Checklist( + options = [ + {'label' : 'Off-targets list', 'value': 'r'}, + {'label' : 'Profile', 'value': 'p'} + ], + value = ['r'], + id = 'result-checklist' + ) + ] + ) + + #Boolean switch for Score calculation + final_list.append( + html.Div( + [daq.BooleanSwitch(on = False, label = "Calculate Scores", labelPosition = "top", id = 'score-switch', style = {'align-items':'start'}), + html.Div([html.Label('Select Genome'), dcc.Dropdown(options = gen_dir, clearable = False, id = "scores-available-genomes-search", placeholder = 'Select the Genome')], id = "div-scores-available-genomes-search", style = {'width':'200px'}) + ], + id = 'container-scores', + className = "flex-container-score", + style = {'width' : '50%'} + ) + ) + + final_list.append(html.Br()) + + #Submit job + final_list.append(html.Button('Submit', id='submit-search-genome')) + final_list.append(html.Div(id='loop_breaker_container-search', children=[])) #Needed for 'breaking' cicular dependency between button and div of spinner + final_list.append(html.Div('Searching', className = 'loader-name', id = 'id-loader-name-search', style = {'visibility':'hidden'})) + final_list.append(html.Div('',className = 'loader', id='spinner-search', style = {'visibility':'hidden'})) + + final_list.append(html.Div(id = "executing-search-genome")) + + #TODO quando il search finisce, i risultati sono salvati in una cartella Results col nome in input + return final_list + elif tab == 'annotate-results': + final_list = [] + final_list.append(html.P('Tool to annotate results found during search with functional annotations (promoter, chromatin accessibility, insulator, etc). The output is a set of files, one is the list of on/off-targets with the annotation type, the others are files containing counts for each guide, the counts are the total on/off-targets found with the specific mismatch threshold and the specific annotation.')) + + #Dropdown available guide file + onlyfile = [f for f in listdir('guides') if isfile(join('guides', f))] + guide_file = [] + for guide_name in onlyfile: + guide_file.append({'label': guide_name, 'value' : guide_name}) + final_list.append(html.P(["Select an available Guide file ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your Guide text file into the guides directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = guide_file, clearable = False, id = "available-guides-annotation"), id = 'div-available-guides-annotation')) + + #Dropdown available result file + onlydir = [f for f in listdir('Results') if isdir(join('Results', f))] + result_file = [] + for result_name in onlydir: + result_file.append({'label': result_name, 'value' : result_name}) + final_list.append(html.P(["Select an available result file ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your directory containing the result file into the Results directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = result_file, clearable = False, id = "available-results-annotation"), id = 'div-available-results-annotation')) #Note that we need the .targets.txt file inside the selected directory + + #Genome name + final_list.extend([html.Label('Insert the name for the output annotated result file'), dcc.Input(id = 'name-result-file-annotated', placeholder='Example: emx1.hg19.annotated', type='text', style = {'width':'20%'})]) + + #Upload text file with path + final_list.append( + html.Div( + [html.P('Select file containing path for annotation'), + dcc.Upload(html.Button('Upload File', id = 'button-upload-path-annotation'), id = 'upload-path-annotation'), + html.P('', id = 'name-upload-file-annotation')] + ) + ) + + #Submit job + final_list.append(html.Button('Submit', id='submit-annotate-result')) + final_list.append(html.Div(id='loop_breaker_container-annotate', children=[])) #Needed for 'breaking' cicular dependency between button and div of spinner + final_list.append(html.Div('Annotating result', className = 'loader-name', id = 'id-loader-name-annotate', style = {'visibility':'hidden'})) + final_list.append(html.Div('',className = 'loader', id='spinner-annotate', style = {'visibility':'hidden'})) + final_list.append(html.Div(id = "executing-annotate-result")) + + return final_list + elif tab == 'generate-report': + final_list = [] + final_list.append(html.P('Tool to generate a graphical report with annotated and overall mismatch and bulge profile for a given guide. The output is a graphical representation of the input guide behaviour.')) + + #Guide Sequence + final_list.extend([html.Label('Insert the Guide sequence'), dcc.Input(id = 'guide-sequence-report', placeholder='Example: GAGTCCGAGCAGAAGAAGAANNN', type='text', size = '33')]) + + #Number of mismatches + final_list.extend([html.Label('Insert the # of mismatches'),dcc.Input(id = 'max-mms-report', placeholder='Example: 2', type='number', min = 0)]) + + #Dropdown available result file + onlydir = [f for f in listdir('Results') if isdir(join('Results', f))] + result_file = [] + for result_name in onlydir: + result_file.append({'label': result_name, 'value' : result_name}) + final_list.append(html.P(["Select an available result file ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your directory containing the result file into the Results directory"))])) + final_list.append(html.Div(dcc.Dropdown(options = result_file, clearable = False, id = "available-results-report"), id = 'div-available-results-report')) #Note that we need the .targets.txt file inside the selected directory + #From the dropdown, we could select and check if all files are available: profile, ext_profile, introns, exons etc... + + #Gecko comparison + final_list.append(daq.BooleanSwitch(on = False, label = "Activate the gecko dataset comparison", labelPosition = "top", id = 'gecko-comparison-report')) #TODO sistemare posizione del bottone + + #Submit job + final_list.append(html.Button('Submit', id='submit-generate-report')) + final_list.append(html.Div(id='loop_breaker_container-generate', children=[])) #Needed for 'breaking' cicular dependency between button and div of spinner + final_list.append(html.Div(html.P('Generating result'), className = 'loader-name', id = 'id-loader-name-generate', style = {'visibility':'hidden'})) + final_list.append(html.Div('',className = 'loader', id='spinner-generate', style = {'visibility':'hidden'})) + + final_list.append(html.Div(id = "executing-generate-report")) + + return final_list + elif tab == 'view-report': + final_list = [] + + #Images from the report #TODO modify the 3 call for .savefig to also create png images in radar_chart.py and radar_chart_docker.py + onlydir = [f for f in listdir('Results') if isdir(join('Results', f))] + result_file = [] + for result_name in onlydir: + result_file.append({'label': result_name, 'value' : result_name}) + final_list.append(html.P(["Select an available result file ", html.Sup(html.Abbr("\u003F", title="To add or remove elements from this list, simply move (remove) your directory containing the result file into the Results directory"))])) + final_list.append(html.Div( + dcc.Dropdown(options = result_file, clearable = False, id = "available-results-view", style={'position':'relative', 'zIndex':'999', 'widht':'50%'}), #position and zindex is for avoid being under column fixed + id = 'div-available-results-view') + ) + final_list.append(html.Br()) + + #Table for targets and score#TODO check if user has created only targets or also scores + + col_list = ['BulgeType', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Direction', 'Mismatches', 'BulgeSize', 'CFD', 'Doench2016'] + col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric'] + cols = [{"name": i, "id": i, 'type':t} for i,t in zip(col_list, col_type)] + final_list.append(dash_table.DataTable( + id='result-table', + columns=cols, + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='' + ) + ) + + # hidden signal value + final_list.append(html.Div(id='signal', style={'display': 'none'})) + + final_list.append(html.Div([ + html.Img(id = 'selected-img', width="65%", height="65%"), + html.Div([html.Button('Next Image', id = 'next-image-button', style = {'display':'none','width': '150px'}), html.P(id = 'filename-show')], id = 'container-button-filename-show', className = 'flex-container-button-filename-show') + ], + id = 'container-img-button-show', + className = "flex-container-img-button-show" + ) + ) + #final_list.append(html.Br()) + #final_list.append(html.Button('Submit-test', id = 'next-image-button')) + + + final_list.append(html.Div(id='intermediate-value', style={'display': 'none'})) #Hidden div to save data for img show (contains list of all images available in a result directory) + return final_list + +###################################################### CALLBACKS ###################################################### + +############################# +# Callbacks for Add variant # +############################# + +#Modify spinner visibility when button is clicked +@app.callback([Output('spinner-variants', 'style'), + Output('id-loader-name-variants', 'style'), + Output('div-available-genomes-variants', 'style'), + Output('div-available-variants-variants', 'style')], + [Input('submit-variants', 'n_clicks')], + [State('available-genomes-variants', 'value'), + State('available-variants-variants', 'value')] +) +def modifyVisibilitySpinVariants(n_clicks, genome, variant): + if n_clicks is None: + raise PreventUpdate + + drop_red = {'border': '3px solid red'} + genome_update = None + variant_update = None + drop_update = False + if genome is None or genome is '': + genome_update = drop_red + drop_update = True + if variant is None or variant is '': + variant_update = drop_red + drop_update = True + if drop_update: + return {'visibility':'hidden'}, {'visibility':'hidden'}, genome_update, variant_update + + if n_clicks > 0: + return {'visibility':'visible'}, {'visibility':'visible'}, None, None + return {'visibility':'hidden'}, {'visibility':'hidden'}, None, None + +#Execute Add Variants to a genome (when spinner is set Visible, otherwise just signal the nex Div to set n_clicks to None) +@app.callback(Output('loop_breaker_container-variants', 'children'), + [Input('spinner-variants', 'style')], + [State('submit-variants', 'n_clicks'), + State('available-genomes-variants', 'value'), + State('available-variants-variants', 'value')] +) +def executeVariants(style, n_clicks, genome, variant): + if n_clicks is None: + raise PreventUpdate + + if style['visibility'] == 'hidden': + raise PreventUpdate + if n_clicks > 0: + command_string = 'add-variants' + ' ' + app_location + 'Variants/' + variant + ' ' + app_location + 'Genomes/' + genome + subprocess.call(['crispritz.py ' + command_string], shell = True) + #TODO docker call + return [html.Div(id='loop_breaker', children=True)] + return [html.Div(id='loop_breaker', children=False)] + +#Loop breaker, if this callback was activated after doing a long process, signal the button callback to hide the spinner +#If was activated in another way, simply signal the button to raise a prevent update +@app.callback( + Output('submit-variants', 'n_clicks'), + [Input ('loop_breaker', 'children')] +) +def resetButtonVariants(val): + ''' + The function resets the n_clicks value of the Submit button. If the value returned from the executeIndex function is True, it means that the crispritz call was executed and finished, + meaning that we need to hide the loading spinner. The n_clicks is put to 0, so that it triggers the modifyVisibilitySpin() function in order to hide the spinner + ''' + if val is None or val is '': + raise PreventUpdate + + if val: + return 0 + return None + + +########################## +# Callbacks for Indexing # +########################## + +#Modify spinner visibility when button is clicked +@app.callback( + [Output('spinner', 'style'), + Output('id-loader-name', 'style'), + Output("executing-index-genome", "children"), + Output('name-genome', 'required'), + Output('max-bulges', 'required'), + Output('div-available-genomes', 'style'), + Output('div-available-pams', 'style')], + [Input('submit-index-genome', 'n_clicks')], + [State('max-bulges', 'value'), + State('name-genome', 'value'), + State('available-genomes', 'value'), + State('available-pams', 'value')] +) +def modifyVisibilitySpin(n_clicks, max_bulges, name, gen, pam): + ''' + The function is called when the button is pressed (n_clicks from None to 1..2..3) or when the n_clicks value is modified in another callback (resetButton() function, n_clicks goes from + it's value >0 to 0). It checks the validity of all input elements and if some input is missing, returns 'required = True' to the appropriate element and changes the style of the loading + spinner to remain hidden. + + If all elements are correctly given in input, and the button is effectively pressed (NOT modified in another callback), it modifies the style of the loading spinner to be visible, and triggers + the executeIndex() function. + + If the n_clicks value is modified in another callback (NOTE: set to 0), it instead modifies the loading spinner style to be hidden, again triggering th executeIndex() function. + ''' + if n_clicks is None: + raise PreventUpdate + drop_red = {'border': '3px solid red'} + #Check if all elements are valid for input + gen_update = None + pam_update = None + drop_update = False #Create red border only if one of the drop is None or '' + if gen is None or gen is '': + gen_update = drop_red + drop_update = True + if pam is None or pam is '': + pam_update = drop_red + drop_update = True + if name is None or name is '' or max_bulges is None or drop_update: #max bulges is none when not set or is not a number + return {'visibility':'hidden'}, {'visibility':'hidden'}, '', True, True, gen_update, pam_update + + if n_clicks > 0: + return {'visibility':'visible'}, {'visibility':'visible'}, '', False, False, None, None + else: + return {'visibility':'hidden'}, {'visibility':'hidden'}, '', False, False, None, None + +#Execute Indexing of a genome (when spinner is set Visible, otherwise just signal the nex Div to set n_clicks to None) +@app.callback(Output('loop_breaker_container', 'children'), + [Input('spinner', 'style')], + [State('submit-index-genome', 'n_clicks'), + State('max-bulges', 'value'), + State('name-genome', 'value'), + State('available-genomes', 'value'), + State('available-pams', 'value')]) +def executeIndex(style, n_clicks, max_bulges, name, gen, pam): + ''' + The function execute the bash call crispritz.py generate-index. It's triggered when the loading spinner style is modified (hidden to visible or visible to hidden). + + If the current style is 'hidden', it raises a PreventUpdate, because it means that the style was modified from the button and it doesn't need to execute the bash function since + it's hidden (E.g.: when an input is missing and the button clicked, executeIndex() is still triggered but remains hidden and the crispritz function doesn't need to be executed) + ''' + if n_clicks is None: + raise PreventUpdate + if style['visibility'] == 'hidden': + raise PreventUpdate + if n_clicks > 0: + command_string = 'index-genome ' + name + ' ' + app_location + 'Genomes/' + gen + ' ' + app_location + 'pam/' + pam + ' ' + '-bMax ' + str(max_bulges) + + subprocess.call(['crispritz.py '+ command_string], shell= True) #TODO find better positioning of Loading and loading gif + + #TODO differntiate between Conda and Docker + #TODO con docker le cartelle create sono root, + #subprocess.call(['docker run -v ${PWD}:/DATA -w /DATA -i pinellolab/crispritz crispritz.py index-genome hg19_ref Genomes/hg19_ref/ pam/pamNGG.txt -bMax 2'], shell=True) + return [html.Div(id='loop_breaker', children=True)] + return [html.Div(id='loop_breaker', children=False)] + +#Loop breaker, if this callback was activated after doing a long process, signal the button callback to hide the spinner +#If was activated in another way, simply signal the button to raise a prevent update +@app.callback( + Output('submit-index-genome', 'n_clicks'), + [Input ('loop_breaker', 'children')] +) +def resetButton(val): + ''' + The function resets the n_clicks value of the Submit button. If the value returned from the executeIndex function is True, it means that the crispritz call was executed and finished, + meaning that we need to hide the loading spinner. The n_clicks is put to 0, so that it triggers the modifyVisibilitySpin() function in order to hide the spinner + ''' + if val is None or val is '': + raise PreventUpdate + + if val: + return 0 + return None + +######################## +# Callbacks for Search # +######################## + +#Switch available fields from nonIndex to Index search +@app.callback([Output('max-dna', 'disabled'), + Output('max-rna', 'disabled'), + Output('max-thr', 'disabled'), + Output('available-genomes-search', 'options')], + [Input('index-search', 'on')] + ) +def switchSearch(on): + if on: + onlydir = [f for f in listdir('genome_library') if isdir(join('genome_library', f))] #select Indexed genomes + gen_dir = [] + for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + + return False, False, False, gen_dir + + onlydir = [f for f in listdir('Genomes') if isdir(join('Genomes', f))] + gen_dir = [] + for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + return True, True, True, gen_dir + +#Switch directory availability for Score +@app.callback(Output('scores-available-genomes-search', 'disabled'), + [Input('score-switch', 'on')] + ) +def switchScore(on): + if on: + return False + return True + +#Modify spinner visibility when button is clicked +@app.callback([ + Output('spinner-search', 'style'), + Output('id-loader-name-search', 'style'), + Output('executing-search-genome', 'children'), + Output('name-result-file', 'required'), + Output('max-mms', 'required'), + Output('max-dna', 'required'), + Output('max-rna', 'required'), + Output('max-thr', 'required'), + Output('div-available-genomes-search', 'style'), + Output('div-available-pams-search', 'style'), + Output('div-available-guides-search', 'style'), + Output('result-checklist', 'style'), + Output('div-scores-available-genomes-search', 'style')], + [Input('submit-search-genome', 'n_clicks')], + [State('index-search', 'on'), + State('available-genomes-search', 'value'), + State('available-pams-search', 'value'), + State('available-guides-search', 'value'), + State('name-result-file', 'value'), + State('max-mms', 'value'), + State('max-dna', 'value'), + State('max-rna', 'value'), + State('max-thr', 'value'), + State('result-checklist', 'value'), + State('score-switch', 'on'), + State('scores-available-genomes-search', 'value')] + ) +def visibilitySpinSearch(n_clicks, index, genome, pam, guide, res_name, mm, dna, rna, thr, res_type, score, genome_score): + if n_clicks is None: + raise PreventUpdate + + drop_red = {'border': '3px solid red'} + drop_red_checklist = {'width':'12%','border':'3px solid red'} + gen_update = None + pam_update = None + guide_update = None + checklist_result_type = None + score_update = None + drop_update = False + + #Check if elements are valid + if genome is None or genome is '': + gen_update = drop_red + drop_update = True + if pam is None or pam is '': + pam_update = drop_red + drop_update = True + if guide is None or guide is '': + guide_update = drop_red + drop_update = True + if not res_type: + checklist_result_type = drop_red_checklist + drop_update = True + if score and (genome_score is None or genome_score is ''): + score_update = drop_red + drop_update = True + + if res_name is None or res_name is '' or mm is None or (index and (dna is None or rna is None or thr is None)) or drop_update: + return {'visibility':'hidden'}, {'visibility':'hidden'},'', True, True, True, True, True, gen_update, pam_update, guide_update, checklist_result_type, score_update + + if n_clicks > 0: + + return {'visibility':'visible'}, {'visibility':'visible'},'', False, False, False, False, False, None, None, None, None, None + + return {'visibility':'hidden'}, {'visibility':'hidden'},'', False, False, False, False, False, None, None, None, None, None + +#Execute Search (when spinnder is visible, otherwise just signal the nex Div to set n_clicks to None ) +@app.callback(Output('loop_breaker_container-search', 'children'), + [Input('spinner-search', 'style')], + [State('submit-search-genome', 'n_clicks'), + State('index-search', 'on'), + State('available-genomes-search', 'value'), + State('available-pams-search', 'value'), + State('available-guides-search', 'value'), + State('name-result-file', 'value'), + State('max-mms', 'value'), + State('max-dna', 'value'), + State('max-rna', 'value'), + State('max-thr', 'value'), + State('result-checklist', 'value'), + State('score-switch', 'on'), + State('scores-available-genomes-search', 'value')] +) +def executeSearch(style, n_clicks, index, genome, pam, guide, result, mms, dna, rna, thr, result_type, scores, genome_scores): + if n_clicks is None: + raise PreventUpdate + + if style['visibility'] == 'hidden': + raise PreventUpdate + + if n_clicks > 0: + if index: + command_string = 'search ' + app_location + 'genome_library/' + genome + ' ' + app_location + 'pam/' + pam + ' ' + app_location + 'guides/' + guide + ' ' + result + ' ' + '-index' + ' ' + '-mm' + ' ' + str(mms) + ' ' + if dna > 0: + command_string = command_string + '-bDNA' + ' ' + str(dna) + ' ' + if rna > 0: + command_string = command_string + '-bRNA' + ' ' + str(rna) + ' ' + if 'r' in result_type and 'p' in result_type: + command_string = command_string + '-t' + ' ' + else: + command_string = command_string + '-' + result_type[0] + ' ' + command_string = command_string + '-th' + ' ' + str(thr) + ' ' + if scores: + command_string = command_string + '-scores' + ' ' + app_location + 'Genomes/' + genome_scores + + subprocess.call(['crispritz.py '+ command_string], shell= True) #TODO docker call + + #Move results into a directory in the Results directory + if not isdir(app_location + 'Results/' + result): + subprocess.call(['mkdir', app_location + 'Results/' + result ]) + subprocess.call(['mv -f' + ' ' + app_location + result + '.*.txt' + ' ' + app_location + 'Results/' + result], shell=True) + subprocess.call(['mv -f' + ' ' + app_location + result + '.*.xls' + ' ' + app_location + 'Results/' + result], shell=True) + return [html.Div(id='loop_breaker', children=True)] + + command_string = 'search ' + app_location + 'Genomes/' + genome + ' ' + app_location + 'pam/' + pam + ' ' + app_location + 'guides/' + guide + ' ' + result + ' ' + '-mm' + ' ' + str(mms) + ' ' + if 'r' in result_type and 'p' in result_type: + command_string = command_string + '-t' + ' ' + else: + command_string = command_string + '-' + result_type[0] + ' ' + if scores: + command_string = command_string + '-scores' + ' ' + app_location + 'Genomes/' + genome_scores + subprocess.call(['crispritz.py ' + command_string], shell= True) #TODO Docker call + + #Move results into a directory in the Results directory + if not isdir(app_location + 'Results/' + result): + subprocess.call(['mkdir', app_location + 'Results/' + result ]) + subprocess.call(['mv -f' + ' ' + app_location + result + '.*.txt' + ' ' + app_location + 'Results/' + result], shell=True) + subprocess.call(['mv -f' + ' ' + app_location + result + '.*.xls' + ' ' + app_location + 'Results/' + result], shell=True) + return [html.Div(id='loop_breaker', children=True)] + return [html.Div(id='loop_breaker', children=False)] + +#Loop breaker, if this callback was activated after doing a long process, signal the button callback to hide the spinner +#If was activated in another way, simply signal the button to raise a prevent update +@app.callback( + Output('submit-search-genome', 'n_clicks'), + [Input ('loop_breaker', 'children')] +) +def resetButtonSearch(val): + ''' + The function resets the n_clicks value of the Submit button. If the value returned from the executeIndex function is True, it means that the crispritz call was executed and finished, + meaning that we need to hide the loading spinner. The n_clicks is put to 0, so that it triggers the modifyVisibilitySpin() function in order to hide the spinner + ''' + if val is None or val is '': + raise PreventUpdate + + if val: + return 0 + return None + +########################## +# Callbacks for Annotate # +########################## + +#Show the uploaded filename +@app.callback(Output('name-upload-file-annotation', 'children'), + [Input('upload-path-annotation', 'filename')] + ) +def showFileName(name): + if name is None: + return '' + return 'Uploaded ' + name + +#Read the uploaded file and converts into bit +def parse_contents(contents): + content_type, content_string = contents.split(',') + + decoded = base64.b64decode(content_string) + return decoded + +#Modify spinner visibility when button is clicked +@app.callback([Output('spinner-annotate', 'style'), + Output('id-loader-name-annotate', 'style'), + Output('executing-annotate-result', 'children'), + Output('div-available-guides-annotation', 'style'), + Output('div-available-results-annotation', 'style'), + Output('button-upload-path-annotation', 'style'), + Output('name-result-file-annotated', 'required')], + [Input('submit-annotate-result', 'n_clicks')], + [State('available-guides-annotation', 'value'), + State('available-results-annotation', 'value'), + State('name-result-file-annotated', 'value'), + State('upload-path-annotation', 'contents')] + ) +def modifyVisibilitySpinAnnotate(n_clicks, guide, result_target, result_name, file_content): + if n_clicks is None: + raise PreventUpdate + + #Check if elements are valid + drop_red = {'border': '3px solid red'} + guide_update = None + res_update = None + upload_btn_update = None + drop_update = False + if guide is None or guide is '': + guide_update = drop_red + drop_update = True + if result_target is None or result_target is '': + res_update = drop_red + drop_update = True + if file_content is None: + upload_btn_update = drop_red + drop_update = True + + if result_name is None or result_name is '' or (drop_update): + return {'visibility':'hidden'}, {'visibility':'hidden'}, '', guide_update, res_update, upload_btn_update, True + + if n_clicks > 0: + return {'visibility':'visible'}, {'visibility':'visible'}, '', guide_update, res_update, upload_btn_update, False + return {'visibility':'hidden'}, {'visibility':'hidden'}, '', None, None, None, False + +#Execute Annotation of a result (when spinner is set Visible, otherwise just signal the nex Div to set n_clicks to None) +@app.callback(Output('loop_breaker_container-annotate', 'children'), + [Input('spinner-annotate', 'style')], + [State('submit-annotate-result', 'n_clicks'), + State('available-guides-annotation', 'value'), + State('available-results-annotation', 'value'), + State('name-result-file-annotated', 'value'), + State('upload-path-annotation', 'contents')] +) +def executeAnnotation(style, n_clicks, guide, result_target, result_name, file_content): + if n_clicks is None: + raise PreventUpdate + if style['visibility'] == 'hidden': + raise PreventUpdate + + if n_clicks > 0: + paths = parse_contents(file_content).decode('utf-8') + file_tmp_location = app_location + 'Results/'+ result_target + '/annotation_path.txt' + file_tmp = open(file_tmp_location, 'w') #TODO choose better name for when multiple users + file_tmp.write(paths) #use file_tmp as input for the bash call + file_tmp.close() + if not isfile(app_location + 'Results/' + result_target + '/' + result_target + '.targets.txt'): + return [html.Div(id='loop_breaker', children=False)] #TODO segnalare che manca il file.targets? + + #Note that we need the targets.txt file inside the result_target directory + command_string = 'annotate-results' + ' ' + app_location + 'guides/' + guide + ' ' + app_location + 'Results/' + result_target + '/' + result_target + '.targets.txt' + ' ' + file_tmp_location + ' ' + result_name + subprocess.call(['crispritz.py ' + command_string], shell = True) + + subprocess.call(['mv -f' + ' ' + app_location + result_name + '.*.txt' + ' ' + app_location + 'Results/' + result_target ], shell=True) + return [html.Div(id='loop_breaker', children=True)] + return [html.Div(id='loop_breaker', children=False)] + +#Loop breaker, if this callback was activated after doing a long process, signal the button callback to hide the spinner +#If was activated in another way, simply signal the button to raise a prevent update +@app.callback( + Output('submit-annotate-result', 'n_clicks'), + [Input ('loop_breaker', 'children')] +) +def resetButtonAnnotate(val): + ''' + The function resets the n_clicks value of the Submit button. If the value returned from the executeIndex function is True, it means that the crispritz call was executed and finished, + meaning that we need to hide the loading spinner. The n_clicks is put to 0, so that it triggers the modifyVisibilitySpin() function in order to hide the spinner + ''' + if val is None or val is '': + raise PreventUpdate + + if val: + return 0 + return None +################################# +# Callbacks for Generate Report # +################################# + +#TODO creare funzione che quando scelgo il Result controllo subito che cisiano tutti i file e nel caso li faccio vedere? + +#Modifiy spinner visibility when button is pressed +@app.callback([Output('spinner-generate', 'style'), + Output('id-loader-name-generate', 'style'), + Output('executing-generate-report', 'value'), + Output('guide-sequence-report', 'required'), + Output('max-mms-report', 'required'), + Output('div-available-results-report', 'style')], + [Input('submit-generate-report', 'n_clicks')], + [State('guide-sequence-report', 'value'), + State('max-mms-report', 'value'), + State('available-results-report', 'value')] +) +def visibilitySpinGenerate(n_clicks, sequence, mms, result_file): + if n_clicks is None: + raise PreventUpdate + #Check if elements are valid + drop_red = {'border': '3px solid red'} + result_update = None + drop_update = False + if result_file is None: + result_update = drop_red + drop_update = True + if sequence is None or sequence is '' or mms is None or drop_update: + return {'visibility':'hidden'}, {'visibility':'hidden'}, '', True, True, result_update + + if n_clicks > 0: + return {'visibility':'visible'}, {'visibility':'visible'}, '', False, False, None + return {'visibility':'hidden'}, {'visibility':'hidden'}, '', False, False, None + +#Execute generate (when spinner is visible, otherwise just signal the nex Div to set n_clicks to None ) +@app.callback(Output('loop_breaker_container-generate', 'children'), + [Input('spinner-generate', 'style')], + [State('submit-generate-report', 'n_clicks'), + State('guide-sequence-report', 'value'), + State('max-mms-report', 'value'), + State('available-results-report', 'value'), + State('gecko-comparison-report','on')] +) +def executeGenerate(style, n_clicks, guide, mms, result_file, gecko): + if n_clicks is None: + raise PreventUpdate + + if style['visibility'] == 'hidden': + raise PreventUpdate + + if n_clicks > 0: + #TODO Check if all file are available + #crispritz.py generate-report GAGTCCGAGCAGAAGAAGAANNN -mm 4 -profile emx1.hg19.profile.xls -extprofile emx1.hg19.extended_profile.xls -exons emx1.hg19.annotated.ExonsCount.txt -introns emx1.hg19.annotated.IntronsCount.txt -dnase emx1.hg19.annotated.DNAseCount.txt -ctcf emx1.hg19.annotated.CTCFCount.txt -promoters emx1.hg19.annotated.PromotersCount.txt -gecko + + command_string = 'generate-report' + ' ' + guide + ' ' + '-mm' + ' ' + str(mms) + ' ' + '-profile' + ' ' + result_file + '.profile.xls' + ' ' + '-extprofile' + ' ' + result_file + '.extended_profile.xls' + ' ' + '-exons' + ' ' + result_file + 'annotated.ExonsCount.txt' #...TODO finish call + if gecko: + command_string = command_string + '-gecko' + + + return [html.Div(id='loop_breaker', children=True)] + return [html.Div(id='loop_breaker', children=False)] + +#Loop breaker, if this callback was activated after doing a long process, signal the button callback to hide the spinner +#If was activated in another way, simply signal the button to raise a prevent update +@app.callback( + Output('submit-generate-report', 'n_clicks'), + [Input ('loop_breaker', 'children')] +) +def resetButtonGenerate(val): + ''' + The function resets the n_clicks value of the Submit button. If the value returned from the executeIndex function is True, it means that the crispritz call was executed and finished, + meaning that we need to hide the loading spinner. The n_clicks is put to 0, so that it triggers the modifyVisibilitySpin() function in order to hide the spinner + ''' + if val is None or val is '': + raise PreventUpdate + + if val: + return 0 + return None + +############################# +# Callbacks for Show Report # +############################# + +#Perform expensive loading of a dataframe and save result into 'global store' +#Cache are in the Cache directory +@cache.memoize() +def global_store(value): + + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('scores.txt') ] + if not target: + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('targets.txt') ] + + df = pd.read_csv('Results/' +value + '/' + target[0], sep = '\t') + df.rename(columns = {"#Bulge type":'BulgeType', '#Bulge_type':'BulgeType','Bulge Size': 'BulgeSize', 'Bulge_Size': 'BulgeSize', 'Doench 2016':'Doench2016','Doench_2016':'Doench2016'}, inplace = True) + return df + +#Signal the loading is done and reset page_current and sorting_filter +@app.callback( + [Output('signal', 'children'), + Output('result-table', 'page_current'), + Output('result-table', "sort_by"), + Output('result-table','filter_query')], + [Input('available-results-view', 'value')] +) +def compute_value(value): + # compute value and send a signal when done + if value is None: + raise PreventUpdate + global_store(value) + return value, 0, [], '' + +#Send the data when next or prev button is clicked on the result table +@app.callback( + Output('result-table', 'data'), + [Input('signal', 'children'), + Input('result-table', "page_current"), + Input('result-table', "page_size"), + Input('result-table', "sort_by"), + Input('result-table', 'filter_query')] +) +def update_table(value, page_current,page_size, sort_by, filter): + if value is None: + raise PreventUpdate + + filtering_expressions = filter.split(' && ') + df = global_store(value) + dff = df + for filter_part in filtering_expressions: + col_name, operator, filter_value = split_filter_part(filter_part) + + if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # these operators match pandas series operator method names + dff = dff.loc[getattr(dff[col_name], operator)(filter_value)] + elif operator == 'contains': + dff = dff.loc[dff[col_name].str.contains(filter_value)] + elif operator == 'datestartswith': + # this is a simplification of the front-end filtering logic, + # only works with complete fields in standard format + dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + if len(sort_by): + dff = dff.sort_values( + [col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False + ) + + return dff.iloc[ + page_current*page_size:(page_current+ 1)*page_size + ].to_dict('records') + +#For filtering +def split_filter_part(filter_part): + for operator_type in operators: + for operator in operator_type: + if operator in filter_part: + name_part, value_part = filter_part.split(operator, 1) + name = name_part[name_part.find('{') + 1: name_part.rfind('}')] + + value_part = value_part.strip() + v0 = value_part[0] + if (v0 == value_part[-1] and v0 in ("'", '"', '`')): + value = value_part[1: -1].replace('\\' + v0, v0) + else: + try: + value = float(value_part) + except ValueError: + value = value_part + + # word operators need spaces after them in the filter string, + # but we don't want these later + return name, operator_type[0].strip(), value + + return [None] * 3 + +#Given the selected result, save the list of available images in that directory +@app.callback( + [Output('intermediate-value', 'children'), + Output('next-image-button', 'n_clicks')], + [Input('available-results-view', 'value')] +) +def loadImgList(value): + if value is None or value is '': + raise PreventUpdate + + onlyimg = [f for f in listdir('Results/' + value) if isfile(join('Results/' + value, f)) and f.endswith('.png')] + json_str = json.dumps(onlyimg) + return json_str, 0 #Returning the n_clicks is needed to 'trick' the system to press the Next button, thus loading an image + +#When result is chosen, or when Next button is pressed, load the img list and go to next image +@app.callback( + [Output('selected-img','src'), + Output('filename-show', 'children'), + Output('next-image-button', 'style')], + [Input('intermediate-value', 'children'), + Input('next-image-button', 'n_clicks')], + [State('available-results-view', 'value')] +) +def showImg(json_data, n_clicks, value): + if json_data is None or json_data is '' or n_clicks is None: + raise PreventUpdate + + img_list = json.loads(json_data) + + img_pos = 0 + if n_clicks > 0 : + img_pos = n_clicks%len(img_list) + image_filename = 'Results/' + value + '/' + img_list[img_pos] + encoded_image = base64.b64encode(open(image_filename, 'rb').read()) + return 'data:image/png;base64,{}'.format(encoded_image.decode()), 'Selected image: ' + image_filename.split('/')[-1] , {'width':'150px'} + + +if __name__ == '__main__': + app.run_server(debug=True) + cache.clear() #delete cache when server is closed \ No newline at end of file diff --git a/OldScripts/app_v4.py b/OldScripts/app_v4.py new file mode 100644 index 0000000..faa3d62 --- /dev/null +++ b/OldScripts/app_v4.py @@ -0,0 +1,1430 @@ +#NEW: +#-sequence input +#-extract guides from sequence +#-General guide result table +#-Spcific table with ordered offtargets for each guide +#-Download results + +import dash +from dash.dependencies import Input, Output, State +import dash_core_components as dcc +import dash_html_components as html +import dash_daq as daq +import dash_table +from dash.exceptions import PreventUpdate +from os import listdir #for getting directories +from os.path import isfile, isdir,join #for getting directories +import subprocess +import base64 #for decoding upload content +import io #for decoding upload content +import pandas as pd #for dash table +import json #for getting and saving report images list +from os import getcwd +import time #measure time for loading df table +from flask_caching import Cache #for cache of .targets or .scores +import os +import string #for job id +import random #for job id +import sys #for sys.exit() +import filecmp #check if Params files are equals +import dash_bootstrap_components as dbc +import collections #For check if guides are the same in two results +from datetime import datetime #For time when job submitted +from seq_script import extract_seq, convert_pam + +PAGE_SIZE = 10 #number of entries in each page of the table in view report + +external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css', dbc.themes.BOOTSTRAP] +app = dash.Dash(__name__, external_stylesheets=external_stylesheets) + +app.title = 'CRISPRitz' +app.config['suppress_callback_exceptions'] = True #necessary if update element in a callback generated in another callback +app.css.config.serve_locally = True +app.scripts.config.serve_locally = True + +CACHE_CONFIG = { + # try 'filesystem' if you don't want to setup redis + 'CACHE_TYPE': 'filesystem', + 'CACHE_DIR': ('Cache')#os.environ.get('REDIS_URL', 'localhost:6379') +} +cache = Cache() +cache.init_app(app.server, config=CACHE_CONFIG) +app_location = os.path.dirname(os.path.abspath(__file__)) + '/' +operators = [['ge ', '>='], + ['le ', '<='], + ['lt ', '<'], + ['gt ', '>'], + ['ne ', '!='], + ['eq ', '='], + ['contains ']] #for filtering + +#Dropdown available genomes +onlydir = [f for f in listdir('Genomes') if isdir(join('Genomes', f))] +onlydir = [x.replace('_', ' ') for x in onlydir] +gen_dir = [] +for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + +#Dropdown available PAM +onlyfile = [f for f in listdir('pam') if isfile(join('pam', f))] +onlyfile = [x.replace('.txt', '') for x in onlyfile] #removed .txt for better visualization +pam_file = [] +for pam_name in onlyfile: + if 'NGG' in pam_name: + pam_file.append({'label':pam_name, 'value':pam_name}) + else: + pam_file.append({'label': pam_name, 'value' : pam_name, 'disabled':True}) + +#Dropdown available Variants +onlydir = [f for f in listdir('Variants') if isdir(join('Variants', f))] +var_dir = [] +for dir in onlydir: + var_dir.append({'label': dir, 'value' : dir}) + +#Available mismatches and bulges +av_mismatches = [{'label': i, 'value': i} for i in range(0, 8)] +av_bulges = [{'label': i, 'value': i} for i in range(0, 6)] +av_guide_sequence = [{'label': i, 'value': i} for i in range(15, 26)] +search_bar = dbc.Row( + [ + #dbc.Col(dbc.Input(type="search", placeholder="Search")), + dbc.Col(dbc.NavLink('HOME', active = True, href = 'http://127.0.0.1:8050', className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})), + dbc.Col(dbc.NavLink('ABOUT', active = True, href = 'http://127.0.0.1:8050', className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})), + dbc.Col( + dbc.DropdownMenu( + children=[ + dbc.DropdownMenuItem("Github", header=True), + dbc.DropdownMenuItem("InfOmics/CRISPRitz", href='https://github.com/InfOmics/CRISPRitz'), + dbc.DropdownMenuItem("Pinellolab/CRISPRitz", href='https://github.com/pinellolab/CRISPRitz'), + ], + #nav=True, + in_navbar=True, + label="Downloads", + style = {'width': '300px !important' } #'height': '400px !important' + ), + ), + dbc.Col(dbc.NavLink('CONTACTS', active = True, href = 'http://127.0.0.1:8050', className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})) + ], + no_gutters=True, + className="ml-auto flex-nowrap mt-3 mt-md-0", + align="center", +) +PLOTLY_LOGO = "https://images.plot.ly/logo/new-branding/plotly-logomark.png" + + +navbar = dbc.Navbar( + [ + html.A( + # Use row and col to control vertical alignment of logo / brand + dbc.Row( + [ + dbc.Col(html.Img(src=PLOTLY_LOGO, height="30px")), + dbc.Col(dbc.NavbarBrand("CRISPRitz Web App", className="ml-2", style = {'font-size': '30px'})) + ], + align="center", + no_gutters=True, + ), + href='http://127.0.0.1:8050', + ), + dbc.NavbarToggler(id="navbar-toggler"), + dbc.Collapse(search_bar, id="navbar-collapse", navbar=True), + ], + color="dark", + dark=True, +) + +#For multipage +app.layout = html.Div([ + navbar, + dcc.Location(id='url', refresh=False), + html.Div(id='page-content'), + html.P(id = 'signal', style = {'visibility':'hidden'}) +]) + + + +#new final_list +final_list = [] +final_list.extend([#html.H1('CRISPRitz Web Application'), + html.Div(children=''' + CRISPRitz is a software package containing 5 different tools dedicated to perform predictive analysis and result assessement on CRISPR/Cas experiments. + '''), + html.P()]) + +final_list.append( + html.Div( + [ + html.P(['Download the offline version here: ', html.A('InfOmics/CRISPRitz', href = 'https://github.com/InfOmics/CRISPRitz', target="_blank"), ' or ', html.A('Pinellolab/CRISPRitz', href = 'https://github.com/pinellolab/CRISPRitz', target="_blank") ]) + ] + ) +) +checklist_div = html.Div( + [ + dbc.FormGroup( + [ + dbc.Checkbox( + id="checkbox-gecko", className="form-check-input" + ), + dbc.Label( + #html.P(['Activate Gecko ', html.Abbr('comparison', title ='The results of your test guides will be compared with results obtained from a previous computed analysis on gecko library')]) , + html.P('Compare your results with the Gecko library'), + html_for="checkbox-gecko", + className="form-check-label", + ), + dbc.Checkbox( + id="checkbox-ref-comp", className="form-check-input" + ), + dbc.Label( + #html.P(['Activate Reference genome ', html.Abbr('comparison', title ='The results of your test guides will be compared with the results obtained from a computed analysis on the corresponding reference genome. Note: this may increase computational time')]) , + html.P('Compare your results with the corresponding reference genome'), + html_for="checkbox-ref-comp", + className="form-check-label", + ), + dbc.Checkbox( + id="checkbox-example-input", className="form-check-input" + ), + dbc.Label( + #html.P(['Activate Reference genome ', html.Abbr('comparison', title ='The results of your test guides will be compared with the results obtained from a computed analysis on the corresponding reference genome. Note: this may increase computational time')]) , + html.P('Insert example parameters'), + html_for="checkbox-example-input", + className="form-check-label", + ), + # dbc.Checkbox( + # id="checkbox-email", className="form-check-input" + # ), + # dbc.Label( + # 'Notify me by email', + # html_for="checkbox-email", + # className="form-check-label", + # ) + ], + check = True + ) + ], + id = 'checklist-test-div' +) + +modal = html.Div( + [ + dbc.Modal( + [ + dbc.ModalHeader("WARNING! Missing inputs"), + dbc.ModalBody('The following inputs are missing, please select values before submitting the job', id = 'warning-list'), + dbc.ModalFooter( + dbc.Button("Close", id="close" , className="modal-button") + ), + ], + id="modal", + centered=True + ), + ] +) + +tab_guides_content = html.Div( + [ + html.P([ + 'Insert crRNA sequence(s), one per line.', + html.P('Sequences must have the same length and be provided without the PAM sequence', id = 'testP') , + ], + style = {'word-wrap': 'break-word'}), + + dcc.Textarea(id = 'text-guides', placeholder = 'GAGTCCGAGCAGAAGAAGAA\nCCATCGGTGGCCGTTTGCCC', style = {'width':'450px', 'height':'160px', 'font-family':'monospace', 'font-size':'large'}), + #html.P('Note: a maximum number of 1000 sequences can be provided'), + dbc.FormText('Note: a maximum number of 1000 sequences can be provided', color = 'secondary') + ], + style = {'width':'450px'} #same as text-area +) +tab_sequence_content = html.Div( + [ + html.P(['Search crRNAs by inserting one or more genomic sequences.', html.P('Chromosome ranges can also be supplied')], + style = {'word-wrap': 'break-word'}), + + dcc.Textarea(id = 'text-sequence', placeholder = '>sequence 1\nAAGTCCCAGGACTTCAGAAGagctgtgagaccttggc\n>sequence2\nchr1:11,130,540-11,130,751', style = {'width':'450px', 'height':'160px', 'font-family':'monospace', 'font-size':'large'}), + #html.P('Note: a maximum number of 1000 sequences can be provided'), + dbc.FormText('Note: a maximum number of 1000 characters can be provided', color = 'secondary') + ], + style = {'width':'450px'} #same as text-area +) +final_list.append( + html.Div( + html.Div( + [ + modal, + html.Div( + [ + html.H3('STEP 1', style = {'margin-top':'0'}), + html.P('Select a genome'), + html.Div( + dcc.Dropdown(options = gen_dir, clearable = False, id = "available-genome",) #style = {'width':'75%'}) + ), + dbc.FormText('Note: Genomes enriched with variants are indicated with a \'+\' symbol', color='secondary'), + # html.P('Add a genome variant', style = {'visibility':'hidden'}), + # html.Div( + # dcc.Dropdown(options = var_dir,clearable = False, id = 'available-variant', style = {'width':'75%', 'visibility':'hidden'}) + # ), + html.Div( + [ + html.Div( + [ + html.P('Select PAM'), + html.Div( + dcc.Dropdown(options = pam_file, clearable = False, id = 'available-pam') + ) + ], + style = {'flex':'0 0 50%', 'margin-top': '10%'} + ), + #html.P('or'), + # html.Div( + # [ + # html.P('Insert custom PAM'), + # dcc.Input(type = 'text', id = 'custom-pam', placeholder = 'NGG', disabled = True) + # ] + # ) + ], + id = 'div-pam', + className = 'flex-div-pam' + ), + html.Div( + [ + # html.P(#'Send us a request to add a specific genome sequence or a variant, or download the offline version' + # [html.A('Contact us', href = 'http://127.0.0.1:8050', target="_blank"),' to request new genomes availability in the dropdown list', html.P('or'), html.P('Download the offline version'),], style = {'margin-top':'10px', 'text-align':'-webkit-center', 'position': 'relative', 'top': '25%'}), + html.Ul( + [html.Li( + [html.A('Contact us', href = 'http://127.0.0.1:8050', target="_blank"),' to request new genomes availability in the dropdown list'], + style = {'margin-top':'5%'} + ), + html.Li( + [html.A('Download', href = 'https://github.com/InfOmics/CRISPRitz'), ' the offline version for more custom parameters'] + ) + ], + style = {'list-style':'inside'} + ), + html.Div( + html.Button('Insert example parameters', id = 'example-parameters', style={'display':'inline-block'}), + style = {'text-align':'center'} + ) + ], + style = {'height':'50%'} + ), + + ], + id = 'step1', + style = {'flex':'0 0 30%', 'tex-align':'center'} + ), + html.Div(style = {'border-right':'solid 1px white'}), + html.Div( + [ + html.H3('STEP 2', style = {'margin-top':'0'}), + html.Div( + [ + html.Div( + [ html.P('Select the input type'), + # dbc.RadioItems( + # options=[ + # {'label': 'Guides', 'value': 'Guides'}, + # {'label': 'Sequence', 'value': 'Sequence'} + # ], + # value='Guides', + # inline = True, + # id = 'guide-or-sequence' + # ), + dbc.Tabs( + [ + dbc.Tab(tab_guides_content, label='Guides', tab_id= 'guide-tab'), + dbc.Tab(tab_sequence_content, label='Sequence', tab_id = 'sequence-tab') + ], + active_tab='guide-tab', + id = 'tabs' + ), + + # html.Div( + # [ + # html.P([ + # 'Insert crRNA sequence(s), one per line.', + # html.P('Sequences must have the same length and be provided without the PAM sequence') , + # #html.Abbr('\uD83D\uDEC8', style = {'text-decoration':'none'} ,title = 'One sequence per line. All sequences must have the same lenght and PAM characters are not required') + # ], + # style = {'word-wrap': 'break-word'}), + + # dcc.Textarea(id = 'text-guides', placeholder = 'GAGTCCGAGCAGAAGAAGAA\nCCATCGGTGGCCGTTTGCCC', style = {'width':'450px', 'height':'160px'}), + # #html.P('Note: a maximum number of 1000 sequences can be provided'), + # dbc.FormText('Note: a maximum number of 1000 sequences can be provided', color = 'secondary') + # ], + # style = {'width':'450px'} #same as text-area + # ) + ], + id = 'div-guides' + ), + html.Div( + [ + html.P('Allowed mismatches'), + dcc.Dropdown(options = av_mismatches, clearable = False, id = 'mms', style = {'width':'60px'}), + html.P('Bulge DNA size'), + dcc.Dropdown(options = av_bulges, clearable = False, id = 'dna', style = {'width':'60px'}), + html.P('Bulge RNA size'), + dcc.Dropdown(options = av_bulges, clearable = False, id = 'rna', style = {'width':'60px'}), + dbc.Fade( + [ + html.P('crRNA length (without PAM)'), + dcc.Dropdown(options = av_guide_sequence, clearable = False, id = 'len-guide-sequence-ver', style = {'width':'60px'}) + ], + id = 'fade-len-guide', is_in= False, appear= False + ) + ] + ) + ], + className = 'flex-step2' + ) + + ], + id = 'step2', + style = {'flex':'0 0 40%'} + + ), + html.Div(style = {'border-right':'solid 1px white'}), + html.Div( + [ + html.H3('Advanced Options'), + checklist_div, + dcc.Checklist( + options = [ + #{'label':'Gecko comparison', 'value':'GC', 'disabled':False}, + #{'label':'Reference genome comparison', 'value':'RGC', 'disabled':False}, + {'label':'Notify me by email','value':'email', 'disabled':False}], + id = 'checklist-advanced', + ), + dbc.Fade( + [ + dbc.FormGroup( + [ + dbc.Label("Email", html_for="example-email"), + dbc.Input(type="email", id="example-email", placeholder="Enter email", className='exampleEmail'), + # dbc.FormText( + # "Are you on email? You simply have to be these days", + # color="secondary", + # ), + ] + ) + ], + id = 'fade', is_in= False, appear= False + ), + #html.H3('Submit', style = {'margin-top':'0'}), + html.Div( + [ + html.Button('Submit', id = 'check-job'), + html.Button('', id = 'submit-job', style = {'visibility':'hidden'}) + ], + style = {'display':'inline-block', 'margin':'0 auto'} #style="height:55px; width:150px" + ) + ], + id = 'step3', + style = {'tex-align':'center'}, + className = 'flex-step3' + ) + ], + id = 'div-steps', + style = {'margin':'1%'}, + className = 'flex-div-steps' + ), + style = {'background-color':'rgba(154, 208, 150, 0.39)', 'border-radius': '10px', 'border':'1px solid black'}, + id = 'steps-background' + ) +) +index_page = html.Div(final_list, style = {'margin':'1%'}) + +#Load Page +final_list = [] +#final_list.append(html.H1('CRISPRitz Web Application')) +final_list.append( + html.Div( + html.Div( + html.Div( + [ + html.P('Job submitted. Copy this link to view the status and the result page '), + html.Div( + html.P('link', id = 'job-link'), + style = {'border':'2px solid', 'border-color':'blue' ,'width':'70%','display':'inline-block', 'margin':'5px'} + ) + ], + style = {'display':'inline-block'} + ), + style = {'display':'inline-block','background-color':'rgba(154, 208, 150, 0.39)', 'border-radius': '10px', 'border':'1px solid black', 'width':'70%'} + ), + style = {'text-align':'center'} + ) +) + +final_list.append( + html.Div( + [ + html.H4('Status report'), + html.Div( + [ + html.Div( + html.Ul( + [ + html.Li('Searching crRNA'), + html.Li('Annotating result'), + html.Li('Generating report') + ] + ), + style = {'flex':'0 0 20%'} + ), + html.Div( + html.Ul( + [ + html.Li('To do', style = {'color':'red'}, id = 'search-status'), + html.Li('To do', style = {'color':'red'}, id = 'annotate-result-status'), + html.Li('To do', style = {'color':'red'}, id = 'generate-report-status') + ], + style = {'list-style-type':'none'} + ) + ) + ], + className = 'flex-status' + ), + html.Div( + [ + dcc.Link('View Results', style = {'visibility':'hidden'}, id = 'view-results'), + html.Div(id = 'no-directory-error') + ] + ) + ], + id = 'div-status-report' + ) +) + +final_list.append(html.P('', id = 'done')) + +final_list.append(dcc.Interval(id = 'load-page-check', interval=3*1000)) +load_page = html.Div(final_list, style = {'margin':'1%'}) + + +#Test bootstrap page +final_list = [] +final_list.append( + html.Div( + [ + html.P('Test P', id= 'test-P'), + html.Button('AA', id = 'button-test1'), + html.Button('BB', id = 'button-test2'), + html.Button('CC', id = 'button-test3'), + html.Button('CC10', id = 'button-test10'), + html.Button ('Gen callback', id = 'gen-callback') + ] +) +) +final_list.append(html.Div(id='test-div-for-button')) + +test_page = html.Div(final_list, style = {'margin':'1%'}) +##################################################CALLBACKS################################################## +@app.callback( + Output('test-P', 'children'), [Input('button-test1', 'n_clicks')] +) +def test1(n): + if n is None: + raise PreventUpdate + print('btn1') + return '' + +@app.callback( + Output('test-div-for-button', 'children'), [Input('button-test10', 'n_clicks')] +) +def test1(n): + if n is None: + raise PreventUpdate + print('btn10') + return '' + +################################################# +#Fade in/out email +@app.callback( + Output("fade", "is_in"), + [Input("checklist-advanced", "value")], + [State("fade", "is_in")], +) +def toggle_fade(selected_options, is_in): + if selected_options is None: + return False + if 'email' in selected_options: + return True + return False + +#Insert/Delete example input +@app.callback( + [Output('available-genome', 'value'), + Output('available-pam', 'value'), + Output('text-guides', 'value'), + Output('mms', 'value'), + Output('dna', 'value'), + Output('rna', 'value'), + Output('len-guide-sequence-ver', 'value'), + Output('text-sequence','value')], + [Input('example-parameters', 'n_clicks')] +) +def inDelExample(n): + if n is None: + raise PreventUpdate + + #TODO mettere esempio già svolto per non caricare troppo il server + return gen_dir[0]['value'], '5\'-NGG-3\'', 'GAGTCCGAGCAGAAGAAGAA', '4', '0', '0', '20','>sequence\nTACCCCAAACGCGGAGGCGCCTCGGGAAGGCGAGGTGGGCAAGTTCAATGCCAAGCGTGACGGGGGA' + +#Email validity +@app.callback( + Output('example-email', 'style'), + [Input('example-email', 'value')] +) +def checkEmailValidity(val): + if val is None: + raise PreventUpdate + + if '@' in val: + return {'border':'1px solid #94f033', 'outline':'0'} + return {'border':'1px solid red'} + +#Fade in guide len dropdown for sequence tabs version +@app.callback( + Output('fade-len-guide', 'is_in'), + [Input('tabs', 'active_tab')], + [State('fade-len-guide', 'is_in')] +) +def resetTab(current_tab, is_in): + if current_tab is None: + raise PreventUpdate + + if current_tab == 'guide-tab': + return False + return True + + +#Check input presence +@app.callback( + [Output('submit-job', 'n_clicks'), + Output('modal', 'is_open'), + Output('available-genome', 'className'), + Output('available-pam', 'className'), + Output('text-guides', 'style'), + Output('mms', 'className'), + Output('dna', 'className'), + Output('rna', 'className'), + Output('len-guide-sequence-ver', 'className'), + Output('warning-list', 'children')], + [Input('check-job','n_clicks'), + Input('close','n_clicks')], + [State('available-genome', 'value'), + State('available-pam','value'), + State('text-guides', 'value'), + State('mms','value'), + State('dna','value'), + State('rna','value'), + State('len-guide-sequence-ver','value'), + State('tabs','active_tab'), + State("modal", "is_open")] +) +def checkInput(n, n_close, genome_selected, pam, text_guides, mms, dna, rna, len_guide_seq, active_tab ,is_open): + if n is None: + raise PreventUpdate + if is_open is None: + is_open = False + + classname_red = 'missing-input' + genome_update = None + pam_update = None + text_update = {'width':'450px', 'height':'160px'} + mms_update = None + dna_update = None + rna_update = None + len_guide_update = None + update_style = False + miss_input_list = [] + + if genome_selected is None or genome_selected is '': + genome_update = classname_red + update_style = True + miss_input_list.append('Genome') + if pam is None or pam is '': + pam_update = classname_red + update_style = True + miss_input_list.append('PAM') + # if text_guides is None or text_guides is '': + # text_update = {'width':'450px', 'height':'160px','border': '1px solid red'} + # update_style = True + # miss_input_list.append('crRNA sequence(s)') + if mms is None or str(mms) is '': + mms_update = classname_red + update_style = True + miss_input_list.append('Allowed Mismatches') + if dna is None or str(dna) is '': + dna_update = classname_red + update_style = True + miss_input_list.append('Bulge DNA size') + if rna is None or str(rna) is '': + rna_update = classname_red + update_style = True + miss_input_list.append('Bulge RNA size') + if (len_guide_seq is None or str(len_guide_seq) is '') and ('sequence-tab' in active_tab): + len_guide_update = classname_red + update_style = True + miss_input_list.append('crRNA length') + miss_input = html.Div( + [ + html.P('The following inputs are missing:'), + html.Ul([html.Li(x) for x in miss_input_list]), + html.P('Please fill in the values before submitting the job') + ] + ) + + if not update_style: + return 1, False, genome_update, pam_update, text_update, mms_update, dna_update, rna_update, len_guide_update, miss_input + return None, not is_open, genome_update, pam_update, text_update, mms_update, dna_update, rna_update, len_guide_update, miss_input + +#Submit Job, change url +@app.callback( + [Output('url', 'pathname'), + Output('url','search')], + [Input('submit-job','n_clicks')], + [State('url', 'href'), + State('available-genome', 'value'), + State('available-pam','value'), + State('text-guides', 'value'), + State('mms','value'), + State('dna','value'), + State('rna','value'), + State('checkbox-gecko','checked'), + State('checkbox-ref-comp', 'checked'), + State('checklist-advanced', 'value'), + State('example-email','value'), + State('tabs','active_tab'), + State('text-sequence','value'), + State('len-guide-sequence-ver', 'value')] +) +def changeUrl(n, href, genome_selected, pam, text_guides, mms, dna, rna, gecko_opt, genome_ref_opt, adv_opts,dest_email, active_tab, text_sequence, len_guide_sequence): #NOTE startJob + ''' + genome_selected can be Human genome (hg19), or Human Genome (hg19) + 1000 Genome Project, the '+' character defines the ref or enr version. + Note that pam parameter can be 5'-NGG-3', but the corresponding filename is 5'-NGG-3'.txt + Pam file (5'-NGG-3'.txt) is structured as NGG 3, or TTTN -4. The created pam.txt inside the result directory add the corresponding N's + Annotations path file is named genome_name_annotationpath.txt, where genome_name is the reference genome name + ''' + if n is None: + raise PreventUpdate + + #Check input, else give simple input + if genome_selected is None or genome_selected is '': + genome_selected = 'hg19_ref' + if pam is None or pam is '': + pam = '5\'-NGG-3\'' + if text_guides is None or text_guides is '': + text_guides = 'GAGTCCGAGCAGAAGAAGAA' + else: + text_guides = text_guides.strip() + if len(text_guides.split('\n')) > 1000: + text_guides = '\n'.join(text_guides.split('\n')[:1000]).strip() + if ( not all(len(elem) == len(text_guides.split('\n')[0]) for elem in text_guides.split('\n'))): + text_guides = selectSameLenGuides(text_guides) + if (len_guide_sequence is None or str(len_guide_sequence) is '') and ('sequence-tab' in active_tab): + len_guide_sequence = 20 + if (text_sequence is None or text_sequence is '') and ('sequence-tab' in active_tab): + text_sequence = '>sequence\nTACCCCAAACGCGGAGGCGCCTCGGGAAGGCGAGGTGGGCAAGTTCAATGCCAAGCGTGACGGGGGA' + + job_id = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 10)) + result_dir = 'Results/' + job_id + subprocess.run(['mkdir ' + result_dir], shell = True) + + search_index = True + search = True + annotation = True + report = True + gecko_comp = False + ref_comparison = False + send_email = False + if adv_opts is None: + adv_opts = [] + if gecko_opt: + gecko_comp = True + if genome_ref_opt: + ref_comparison = True + if 'email' in adv_opts and dest_email is not None and len(dest_email.split('@')) > 1 and dest_email.split('@')[-1] is not '': + send_email = True + with open(result_dir + '/email.txt', 'w') as e: + e.write(dest_email + '\n') + e.write('http://127.0.0.1:8050/load?job=' + job_id + '\n') + e.write(datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S") + '\n') + e.write('Job done. Parameters: etc etc') + e.close() + + + #Set parameters + genome_selected = genome_selected.replace(' ', '_') + genome_ref = genome_selected.split('+')[0] #+ char to separate ref and vcf, eg Human_genome+1000_genome_project + if genome_ref == genome_selected: + ref_comparison = False + #NOTE Indexed genomes names are PAM + _ + bMax + _ + genome_selected + + pam_len = 0 + custom_pam = None + + with open('pam/' + pam + '.txt') as pam_file: + pam_char = pam_file.readline() + index_pam_value = pam_char.split(' ')[-1] + if int(pam_char.split(' ')[-1]) < 0: + end_idx = int(pam_char.split(' ')[-1]) * (-1) + pam_char = pam_char.split(' ')[0][0 : end_idx] + pam_len = end_idx + pam_begin = True + else: + end_idx = int(pam_char.split(' ')[-1]) + pam_char = pam_char.split(' ')[0][end_idx * (-1):] + pam_len = end_idx + pam_begin = False + + if 'sequence-tab' in active_tab: + #Extract sequence and create the guides + guides = [] + for name_and_seq in text_sequence.split('>'): + if '' == name_and_seq: + continue + name, seq = name_and_seq.strip().split('\n') + if 'chr' in seq: + extracted_seq = extract_seq.extractSequence(name, seq, genome_selected.replace(' ', '_')) + else: + extracted_seq = seq.strip() + guides.extend(convert_pam.getGuides(extracted_seq, pam_char, len_guide_sequence)) + text_guides = '\n'.join(guides).strip() + + + len_guides = len(text_guides.split('\n')[0]) + if (pam_begin): + pam_to_file = pam_char + ('N' * len_guides) + ' ' + index_pam_value + else: + pam_to_file = ('N' * len_guides) + pam_char + ' ' + index_pam_value + + save_pam_file = open(result_dir + '/pam.txt', 'w') + save_pam_file.write(pam_to_file) + save_pam_file.close() + pam = result_dir + '/pam.txt' + + guides_file = result_dir + '/guides.txt' + if text_guides is not None and text_guides is not '': + save_guides_file = open(result_dir + '/guides.txt', 'w') + if (pam_begin): + text_guides = 'N' * pam_len + text_guides.replace('\n', '\n' + 'N' * pam_len) + else: + text_guides = text_guides.replace('\n', 'N' * pam_len + '\n') + 'N' * pam_len + save_guides_file.write(text_guides) + save_guides_file.close() + + if (int(dna) == 0 and int(rna) == 0): + search_index = False + max_bulges = rna + if (int(dna) > int(rna)): + max_bulges = dna + + if (search_index): + search = False + + if int(max_bulges) <= 2: + genome_idx = pam_char + '_' + '2' + '_' + genome_selected + else: + genome_idx = pam_char + '_' + '5' + '_' + genome_selected + genome_idx_ref = genome_idx.split('+')[0] + + #Create Params.txt file + with open(result_dir + '/Params.txt', 'w') as p: #NOTE if modified, chenge also mms value in update_table function + p.write('Genome_selected\t' + genome_selected + '\n') + p.write('Genome_ref\t' + genome_ref + '\n') + if search_index: + p.write('Genome_idx\t' + genome_idx + '\n') + else: + p.write('Genome_idx\t' + 'None\n') + p.write('Pam\t' + pam_char + '\n') + p.write('Max_bulges\t' + str(max_bulges) + '\n') + p.write('Mismatches\t' + str(mms) + '\n') + p.write('DNA\t' + str(dna) + '\n') + p.write('RNA\t' + str(rna) + '\n') + p.write('Gecko\t' + str(gecko_comp) + '\n') + p.write('Ref_comp\t' + str(ref_comparison) + '\n') + p.close() + + #Check if input parameters (mms, bulges, pam, guides, genome) are the same as a previous search + all_result_dirs = [f for f in listdir('Results') if isdir(join('Results', f))] + all_result_dirs.remove(job_id) + #all_result_dirs.remove('test') + for check_param_dir in all_result_dirs: + if os.path.exists('Results/' + check_param_dir + '/Params.txt'): + if os.path.exists('Results/' + check_param_dir + '/log.txt'): + with open('Results/' + check_param_dir + '/log.txt') as log: + if ('Job\tDone' in log.read()): + if (filecmp.cmp('Results/' + check_param_dir + '/Params.txt', result_dir + '/Params.txt' )): + guides1 = open('Results/' + check_param_dir + '/guides.txt').read().split('\n') + guides2 = open('Results/' + job_id + '/guides.txt').read().split('\n') + if (collections.Counter(guides1) == collections.Counter(guides2)): + search = False + search_index = False + subprocess.run(['cp $PWD/Results/' + check_param_dir + '/' + check_param_dir + '* ' + result_dir + '/'], shell = True) + subprocess.run(['cp $PWD/Results/' + check_param_dir + '/*.png ' + result_dir + '/'], shell = True) + subprocess.run(['rename \'s/' + check_param_dir + '/' + job_id + '/g\' ' + result_dir + '/*'], shell = True) + break + + #Annotation + if (not search and not search_index): + annotation = False + + #Generate report + if (not search and not search_index): + report = False + + annotation_filepath = [f for f in listdir('./') if isfile(join('./', f)) and f.startswith(genome_ref)] + + + subprocess.Popen(['assets/./submit_job.sh ' + 'Results/' + job_id + ' ' + 'Genomes/' + genome_selected + ' ' + 'Genomes/' + genome_ref + ' ' + 'genome_library/' + genome_idx + ( + ' ' + pam + ' ' + guides_file + ' ' + str(mms) + ' ' + str(dna) + ' ' + str(rna) + ' ' + str(search_index) + ' ' + str(search) + ' ' + str(annotation) + ( + ' ' + str(report) + ' ' + str(gecko_comp) + ' ' + str(ref_comparison) + ' ' + 'genome_library/' + genome_idx_ref + ' ' + str(send_email) + ' ' + annotation_filepath[0] + ) + )], shell = True) + return '/load','?job=' + job_id + +#When url changed, load new page +@app.callback( + [Output('page-content', 'children'), + Output('job-link', 'children')], + [Input('url', 'href'), Input('url','pathname'), Input('url','search')],[State('url','hash')] + # [State('url','pathname'), + # State('url','search')] +) +def changePage( href, path, search, hash_guide): + # print('href', href) + # print('hash', hash_guide) + # print('pathname', path) + # print('search', search) + print('hash', hash_guide) + if path == '/load': + return load_page, 'http://127.0.0.1:8050/load' + search #NOTE change the url part when DNS are changed + if path == '/result': + job_id = search.split('=')[-1] + if hash_guide is None or hash_guide is '': + return resultPage(job_id), 'http://127.0.0.1:8050/load' + search + return guidePage(job_id, hash_guide.split('#')[1]), 'http://127.0.0.1:8050/load' + search + if path == '/test-page': + return test_page, 'http://127.0.0.1:8050/load' + search + + return index_page, '' + +#Check end job +@app.callback( + [Output('view-results', 'style'), + Output('annotate-result-status', 'children'), + Output('search-status', 'children'), + Output('generate-report-status', 'children'), + Output('view-results','href'), + Output('no-directory-error', 'children')], + [Input('load-page-check', 'n_intervals')], + [State('url', 'search')] +) +def refreshSearch(n, dir_name): + if n is None: + raise PreventUpdate #TODO fa un controllo subito, così l'utente non deve aspettare 3 secondi per l'update + + onlydir = [f for f in listdir('Results') if isdir(join('Results', f))] + current_job_dir = 'Results/' + dir_name.split('=')[-1] + '/' + if dir_name.split('=')[-1] in onlydir: + onlyfile = [f for f in listdir(current_job_dir) if isfile(join(current_job_dir, f))] + if 'log.txt' in onlyfile: + with open(current_job_dir + 'log.txt') as log: + all_done = 0 + annotate_res_status = html.P('To do', style = {'color':'red'}) + search_status = html.P('To do', style = {'color':'red'}) + report_status = html.P('To do', style = {'color':'red'}) + current_log = log.read() + if ('Annotation\tDone' in current_log): + annotate_res_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if ('Search-index\tDone' in current_log or 'Search\tDone' in current_log): + search_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if ('Report\tDone' in current_log): + report_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if all_done == 3: + return {'visibility':'visible'}, annotate_res_status, search_status, report_status, '/result?job=' + dir_name.split('=')[-1], '' + else: + return {'visibility':'hidden'}, annotate_res_status, search_status, report_status,'', '' + return {'visibility':'hidden'}, html.P('Not available', style = {'color':'red'}), html.P('Not available', style = {'color':'red'}), html.P('Not available', style = {'color':'red'}), '', dbc.Alert("The selected result does not exist", color = "danger") + +#Perform expensive loading of a dataframe and save result into 'global store' +#Cache are in the Cache directory +@cache.memoize() +def global_store(value): + + if value is None: + return '' + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('scores.txt') ] + if not target: + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('targets.txt') ] + + df = pd.read_csv('Results/' +value + '/' + target[0], sep = '\t') + df.rename(columns = {"#Bulge type":'BulgeType', '#Bulge_type':'BulgeType','Bulge Size': 'BulgeSize', 'Bulge_Size': 'BulgeSize', 'Doench 2016':'Doench2016','Doench_2016':'Doench2016'}, inplace = True) + return df + +# #Callback to populate the tab, note that it's called when the result_page is loaded (dash implementation), so we do not use raise update to block this first callback +# @app.callback( +# [Output('signal','children'), +# Output('result-table','page_current'), +# Output('result-table', "sort_by"), +# Output('result-table','filter_query')], +# [Input('url', 'pathname')], +# [State('url', 'search')] +# ) +# def populateTable(pathname, search): +# print('pathname', pathname) +# if pathname != '/result': +# raise PreventUpdate + +# job_id = search.split('=')[-1] +# job_directory = 'Results/' + job_id + '/' +# print('job dir', job_directory) +# if(not isdir(job_directory)): +# return 'not_exists', 0, [], '' +# #global_store(job_id) +# print('ok') +# return job_id, 0, [], '' + +#Send the data when next or prev button is clicked on the result table +@app.callback( + Output('result-table', 'data'), + [Input('result-table', "page_current"), + Input('result-table', "page_size"), + Input('result-table', "sort_by"), + Input('result-table', 'filter_query')], + [State('url', 'search'), + State('url', 'hash')] +) +def update_table(page_current, page_size, sort_by, filter, search, hash_guide): + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + guide = hash_guide.split('#')[1] + value = job_id + if search is None: + raise PreventUpdate + + + filtering_expressions = filter.split(' && ') + #filtering_expressions.append(['{crRNA} = ' + guide]) + df = global_store(value) + dff = df[df['crRNA'] == guide] + print('len index before', len(dff.index)) + sort_by.insert(0, {'column_id' : 'Mismatches', 'direction': 'asc'}) + sort_by.insert(1, {'column_id' : 'BulgeSize', 'direction': 'asc'}) + sort_by.insert(2, {'column_id': 'CFD', 'direction':'desc'}) + for filter_part in filtering_expressions: + col_name, operator, filter_value = split_filter_part(filter_part) + + if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # these operators match pandas series operator method names + dff = dff.loc[getattr(dff[col_name], operator)(filter_value)].sort_values([col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False) + elif operator == 'contains': + dff = dff.loc[dff[col_name].str.contains(filter_value)] + elif operator == 'datestartswith': + # this is a simplification of the front-end filtering logic, + # only works with complete fields in standard format + dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + print('len index after', len(dff.index)) + #NOTE sort_by: [{'column_id': 'BulgeType', 'direction': 'asc'}, {'column_id': 'crRNA', 'direction': 'asc'}] + #sort_by.insert(0, {'column_id' : 'Mismatches', 'direction': 'asc'}) + #sort_by.insert(0, {'column_id' : 'BulgeSize', 'direction': 'asc'}) + if len(sort_by): + dff = dff.sort_values( + [col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False + ) + + #Check if results are not 0 + warning_no_res = '' + with open(job_directory + job_id + '.targets.txt') as t: + no_result = False + t.readline() + last_line = t.readline() + if (last_line is '' or last_line is '\n'): + no_result = True + + if (no_result): + warning_no_res = dbc.Alert("No results were found with the given parameters", color = "warning") + + + return dff.iloc[ + page_current*page_size:(page_current+ 1)*page_size + ].to_dict('records') + + +#For filtering +def split_filter_part(filter_part): + for operator_type in operators: + for operator in operator_type: + if operator in filter_part: + name_part, value_part = filter_part.split(operator, 1) + name = name_part[name_part.find('{') + 1: name_part.rfind('}')] + + value_part = value_part.strip() + v0 = value_part[0] + if (v0 == value_part[-1] and v0 in ("'", '"', '`')): + value = value_part[1: -1].replace('\\' + v0, v0) + else: + try: + value = float(value_part) + except ValueError: + value = value_part + + # word operators need spaces after them in the filter string, + # but we don't want these later + return name, operator_type[0].strip(), value + + return [None] * 3 + + +#Read the uploaded file and converts into bit +def parse_contents(contents): + content_type, content_string = contents.split(',') + + decoded = base64.b64decode(content_string) + return decoded + +#Show image: Barplot +@app.callback( + [Output('barplot-img', 'src'), + Output('link-barplot', 'href')], + [Input('mms-dropdown','value')], + [State('url', 'search')] +) +def showImages(mms, search): + if mms is None: + raise PreventUpdate + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + barplot_img = 'summary_histogram_' + str(mms) + 'mm.png' + try: #NOTE serve per non generare errori se il barplot non è stato fatto + barplot_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + barplot_img, 'rb').read()).decode()) + barplot_href = 'assets/Img/' + job_id + '/' + barplot_img + except: + barplot_src = '' + barplot_href = '' + # guide = all_guides[int(sel_cel[0]['row'])]['Guide'] + # radar_img = 'summary_single_guide_' + guide + '_' + str(mms) + 'mm.png' + # try: + # radar_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + radar_img, 'rb').read()).decode()) + # radar_href = 'assets/Img/' + job_id + '/' + radar_img + # except: + # radar_src = '' + # radar_href = '' + return barplot_src, barplot_href + +#Show image: Radar chart +@app.callback( + [Output('radar-img', 'src'), + Output('link-radar', 'href')], + [Input('mms-dropdown-guide-specific','value')], + [State('url', 'search'), State('url','hash')] +) +def showImages(mms, search, hash_guide): + if mms is None: + raise PreventUpdate + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + # barplot_img = 'summary_histogram_' + str(mms) + 'mm.png' + # try: #NOTE serve per non generare errori se il barplot non è stato fatto + # barplot_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + barplot_img, 'rb').read()).decode()) + # barplot_href = 'assets/Img/' + job_id + '/' + barplot_img + # except: + # barplot_src = '' + # barplot_href = '' + # guide = all_guides[int(sel_cel[0]['row'])]['Guide'] + guide = hash_guide.split('#')[1] + radar_img = 'summary_single_guide_' + guide + '_' + str(mms) + 'mm.png' + try: + radar_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + radar_img, 'rb').read()).decode()) + radar_href = 'assets/Img/' + job_id + '/' + radar_img + except: + radar_src = '' + radar_href = '' + return radar_src, radar_href + +def generate_table(dataframe, id_table, max_rows=26): + return html.Table( + # Header + [html.Tr([html.Th(col) for col in dataframe.columns]) ] + + # Body + [html.Tr([ + html.Td(dataframe.iloc[i][col]) for col in dataframe.columns + ]) for i in range(min(len(dataframe), max_rows))], + style = {'display':'inline-block'}, + id = id_table + ) + +#FOR BUTTON IN TABLE +# element.style { +# background: none; +# border: none; +# margin: 0; +# padding: 0; +# cursor: pointer; +# font-family: monospace; +# font-size: large; +# font-weight: normal; +# } + + + + +#If the input guides are different len, select the ones with same length as the first +def selectSameLenGuides(list_guides): + selected_length = len(list_guides.split('\n')[0]) + same_len_guides_list = [] + for guide in list_guides.split('\n'): + if len(guide) == selected_length: + same_len_guides_list.append(guide) + same_len_guides = '\n'.join(same_len_guides_list).strip() + return same_len_guides + +def resultPage(job_id): + value = job_id + job_directory = 'Results/' + job_id + '/' + warning_message = [] + if (not isdir(job_directory)): + return html.Div(dbc.Alert("The selected result does not exist", color = "danger")) + + #Load mismatches + with open('Results/' + value + '/Params.txt') as p: + mms = (next(s for s in p.read().split('\n') if 'Mismatches' in s)).split('\t')[-1] + + mms = int(mms[0]) + mms_values = [{'label':i, 'value':i} for i in range(mms + 1) ] + + col_profile_general = ['Total On-Targets', 'Total Off-Targets'] + for i in range(mms): + col_profile_general.append(str(i+1) + ' Mismatches') + col_type = ['numeric' for i in col_profile_general] + + + #Load profile + try: + profile = pd.read_csv('Results/' + value + '/' + value + '.profile_complete.xls') #NOTE profile_complete has ',' as separator + if len(profile.columns) == 1: + profile = pd.read_csv('Results/' + value + '/' + value + '.profile.xls', sep='\t') + except: + profile = pd.read_csv('Results/' + value + '/' + value + '.profile.xls', sep = '\t') #NOTE profile has \t as separator or ',' + if len(profile.columns) == 1: + profile = pd.read_csv('Results/' + value + '/' + value + '.profile.xls') + + columns_profile_table = [{'name':'Guide', 'id' : 'Guide', 'type':'text'}, {'name':'CFD', 'id': 'CFD', 'type':'numeric'}, {'name':'Total On-Targets', 'id' : 'Total On-Targets', 'type':'numeric'}, {'name':'Total Off-targets', 'id' : 'Total Off-Targets', 'type':'numeric'}] + keep_column = ['GUIDE', 'ONT', 'OFFT'] + for i in range (mms): + columns_profile_table.append({'name': str(i+1) + ' Mismatches', 'id':str(i+1) + ' Mismatches', 'type':'numeric'}) + keep_column.append(str(i+1) + 'MM') + columns_profile_table.append({'name':'More Info', 'id':'More Info', 'type':'text'}) + print(profile.columns) + profile = profile[keep_column] + rename_columns = {'GUIDE':'Guide',"ONT":'Total On-Targets', 'OFFT':'Total Off-Targets'} + for i in range(mms): + rename_columns[str(i+1) + 'MM'] = str(i+1) + ' Mismatches' + + profile.rename(columns = rename_columns, inplace = True) #Now profile is Guide, Total On-targets, ... + #link_for_guides = [html.A('Show all...', href = 'http://127.0.0.1:8050/result?job=' + job_id + '#' + i, target = '_blank') for i in profile['Guide']] + #profile['More Info'] = link_for_guides + final_list = [] + + + #final_list.append(html.H1('CRISPRitz Web Application')) + final_list.append( + html.H3('Result Summary') + ) + #final_list.append(html.P('Select a Guide to view more details')) + # final_list.append(html.Div( + # generate_table(profile, 'result-page-guide-table'), + # style = {'text-align':'center'} + # ) + # ) + + #load acfd for each guide #TODO sistemare e controllare + with open('Results/' + value + '/acfd.txt') as a: + acfd = a.read().strip().split('\n') + acfd.remove('crRNA 0') + acfd.sort() + acfd = [float(a.split(' ')[-1]) for a in acfd] + acfd = [int(round((100/(100 + x))*100)) for x in acfd] + profile = profile.sort_values('Guide') + profile['CFD'] = acfd + profile = profile.sort_values('CFD', ascending = False) + final_list.append( + html.Div( + dash_table.DataTable( + id = 'general-profile-table', + page_size=PAGE_SIZE, + columns = columns_profile_table, + data = profile.to_dict('records') + ) + ,id = 'div-general-profile-table') + ) + + final_list.append(html.Br()) + final_list.append( + html.Div( + [ + html.Div( + [ + html.H5('Comparison with Reference Genome'), + html.P('Select the mismatch value'), + dcc.Dropdown(options = mms_values, id = 'mms-dropdown', style = {'flex':'0 0 5%', 'width':'100px'}, clearable = False) + ] + ), + html.Div( + html.A( + html.Img(id = 'barplot-img', width="100%", #height="30%", + + ), + + target="_blank", + id = 'link-barplot' + + ), + style = {'flex':'0 0 30%'} + ), + html.Div( + html.A( + html.Img( width="100%", #height="30%", + + ), + + target="_blank", + + ), + style = {'flex':'0 0 30%'} + ), + + ], + className = 'flex-view-images' + ) + ) + + result_page = html.Div(final_list, style = {'margin':'1%'}) + return result_page + + +def guidePage(job_id, guide): + value = job_id + final_list = [] + final_list.append(html.P('List of Targets found for the selected guide')) + col_list = ['BulgeType', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Direction', 'Mismatches', 'BulgeSize', 'CFD', 'Doench2016'] + col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric'] + cols = [{"name": i, "id": i, 'type':t} for i,t in zip(col_list, col_type)] + job_directory = 'Results/' + job_id + '/' + #Load mismatches + with open('Results/' + value + '/Params.txt') as p: + mms = (next(s for s in p.read().split('\n') if 'Mismatches' in s)).split('\t')[-1] + + mms = int(mms[0]) + mms_values = [{'label':i, 'value':i} for i in range(mms + 1) ] + global_store(job_id) #TODO controllare se carica ogni volta o solo la prima + #NOTE the filtering is done automatically when the page is loaded due to the function update_table since it's triggered when the table is created, putting page_current, sort_by etc + #at initial values + + # df = global_store(job_id) + # dff = df + # filtering_expressions = ['{crRNA} = ' + guide] + # for filter_part in filtering_expressions: + # col_name, operator, filter_value = split_filter_part(filter_part) + + # if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # # these operators match pandas series operator method names + # dff = dff.loc[getattr(dff[col_name], operator)(filter_value)] + # elif operator == 'contains': + # dff = dff.loc[dff[col_name].str.contains(filter_value)] + # elif operator == 'datestartswith': + # # this is a simplification of the front-end filtering logic, + # # only works with complete fields in standard format + # dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + final_list.append( + html.Div( + dash_table.DataTable( + id='result-table', + columns=cols, + #data = dff.to_dict('records'), + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='', + style_table={ + 'height': '300px', + #'overflowY': 'scroll', + }, + # style_data_conditional=[{ + # "if": {'column_id':'BulgeType', 'filter_query' : 'BulgeType eq "RNA"'}, #{'filter_query' : 'BulgeType eq "RNA"'}, + # "backgroundColor": "lightblue", + # 'color': 'white' + # }], + ), + id = 'div-result-table', + ) + ) + final_list.append(html.Br()) + final_list.append( + html.Div( + [ + html.Div( + [ + html.P('Select the mismatch value'), + dcc.Dropdown(options = mms_values, id = 'mms-dropdown-guide-specific', style = {'flex':'0 0 5%'}, clearable = False) + ] + ), + + html.Div( + html.A( + html.Img(id = 'radar-img', width="100%", #height="30%", + + ), + + target="_blank", + id = 'link-radar' + + ), + style = {'flex':'0 0 30%'} + ), + html.Div( + html.A( + html.Img( width="100%", #height="30%", + + ), + + target="_blank", + + ), + style = {'flex':'0 0 30%'} + ) + + ], + className = 'flex-view-images' + ) + ) + return html.Div(final_list, style = {'margin':'1%'}) + +if __name__ == '__main__': + app.run_server(debug=True) + cache.clear() #delete cache when server is closed + + #BUG quando faccio scores, se ho dei char IUPAC nei targets, nel terminale posso vedere 150% 200% etc perche' il limite massimo e' basato su wc -l dei targets, ma possono aumentare se ho molti + #Iupac diff --git a/OldScripts/app_v5.py b/OldScripts/app_v5.py new file mode 100644 index 0000000..0c1aab7 --- /dev/null +++ b/OldScripts/app_v5.py @@ -0,0 +1,2675 @@ +#NEW: +#-sequence input +#-extract guides from sequence +#-General guide result table +#-Spcific table with ordered offtargets for each guide +#-Download results + +import dash +from dash.dependencies import Input, Output, State +import dash_core_components as dcc +import dash_html_components as html +import dash_daq as daq +import dash_table +from dash.exceptions import PreventUpdate +from os import listdir #for getting directories +from os.path import isfile, isdir,join #for getting directories +import subprocess +import base64 #for decoding upload content +import io #for decoding upload content +import pandas as pd #for dash table +import json #for getting and saving report images list +from os import getcwd +import time #measure time for loading df table +from flask_caching import Cache #for cache of .targets or .scores +import os +import string #for job id +import random #for job id +import sys #for sys.exit() +import filecmp #check if Params files are equals +import dash_bootstrap_components as dbc +import collections #For check if guides are the same in two results +from datetime import datetime #For time when job submitted +from seq_script import extract_seq, convert_pam +import re #For sort chr filter values +#Warning symbol \u26A0 + +PAGE_SIZE = 100 #number of entries in each page of the table in view report +URL = 'http://127.0.0.1:8050' +external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css', dbc.themes.BOOTSTRAP] +app = dash.Dash(__name__, external_stylesheets=external_stylesheets) + +app.title = 'CRISPRitz' +app.config['suppress_callback_exceptions'] = True #necessary if update element in a callback generated in another callback +app.css.config.serve_locally = True +app.scripts.config.serve_locally = True + +CACHE_CONFIG = { + # try 'filesystem' if you don't want to setup redis + 'CACHE_TYPE': 'filesystem', + 'CACHE_DIR': ('Cache')#os.environ.get('REDIS_URL', 'localhost:6379') +} +cache = Cache() +cache.init_app(app.server, config=CACHE_CONFIG) +app_location = os.path.dirname(os.path.abspath(__file__)) + '/' +operators = [['ge ', '>='], + ['le ', '<='], + ['lt ', '<'], + ['gt ', '>'], + ['ne ', '!='], + ['eq ', '='], + ['contains ']] #for filtering + +#Populations 1000 gen proj +population_1000gp = { + 'EAS':['CHB', 'JPT', 'CHS', 'CDX', 'KHV'], + 'EUR':['CEU', 'TSI', 'FIN', 'GBR', 'IBS'], + 'AFR':['YRI', 'LWK', 'GWD', 'MSL', 'ESN', 'ASW', 'ACB'], + 'AMR':['MXL', 'PUR', 'CLM', 'PEL'], + 'SAS':['GIH', 'PJL', 'BEB', 'STU', 'ITU'] +} + +#Dropdown available genomes +onlydir = [f for f in listdir('Genomes') if isdir(join('Genomes', f))] +onlydir = [x.replace('_', ' ') for x in onlydir] +gen_dir = [] +for dir in onlydir: + gen_dir.append({'label': dir, 'value' : dir}) + +#Dropdown available PAM +onlyfile = [f for f in listdir('pam') if isfile(join('pam', f))] +onlyfile = [x.replace('.txt', '') for x in onlyfile] #removed .txt for better visualization +pam_file = [] +for pam_name in onlyfile: + if 'NGG' in pam_name: #TODO modificare per selezionare solo le PAM disponibili + pam_file.append({'label':pam_name, 'value':pam_name}) + else: + pam_file.append({'label': pam_name, 'value' : pam_name, 'disabled':True}) + + +#Available mismatches and bulges +av_mismatches = [{'label': i, 'value': i} for i in range(0, 8)] +av_bulges = [{'label': i, 'value': i} for i in range(0, 6)] +av_guide_sequence = [{'label': i, 'value': i} for i in range(15, 26)] +search_bar = dbc.Row( + [ + #dbc.Col(dbc.Input(type="search", placeholder="Search")), + dbc.Col(dbc.NavLink('HOME', active = True, href = URL, className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})), + dbc.Col(dbc.NavLink('ABOUT', active = True, href = URL, className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})), + dbc.Col( + dbc.DropdownMenu( + children=[ + dbc.DropdownMenuItem("Github", header=True), + dbc.DropdownMenuItem("InfOmics/CRISPRitz", href='https://github.com/InfOmics/CRISPRitz'), + dbc.DropdownMenuItem("Pinellolab/CRISPRitz", href='https://github.com/pinellolab/CRISPRitz'), + ], + #nav=True, + in_navbar=True, + label="Downloads", + style = {'width': '300px !important' } #'height': '400px !important' + ), + ), + dbc.Col(dbc.NavLink('CONTACTS', active = True, href = URL, className= 'testHover', style = {'text-decoration':'none', 'color':'white', 'font-size':'1.5rem'})) + ], + no_gutters=True, + className="ml-auto flex-nowrap mt-3 mt-md-0", + align="center", +) +PLOTLY_LOGO = "https://images.plot.ly/logo/new-branding/plotly-logomark.png" #TODO modificare logo + + +navbar = dbc.Navbar( + [ + html.A( + # Use row and col to control vertical alignment of logo / brand + dbc.Row( + [ + dbc.Col(html.Img(src=PLOTLY_LOGO, height="30px")), + dbc.Col(dbc.NavbarBrand("CRISPRitz Web App", className="ml-2", style = {'font-size': '30px'})) + ], + align="center", + no_gutters=True, + ), + href=URL, + ), + dbc.NavbarToggler(id="navbar-toggler"), + dbc.Collapse(search_bar, id="navbar-collapse", navbar=True), + ], + color="dark", + dark=True, +) + +#For multipage +app.layout = html.Div([ + navbar, + dcc.Location(id='url', refresh=False), + html.Div(id='page-content'), + html.P(id = 'signal', style = {'visibility':'hidden'}) +]) + + + +#Main Page +final_list = [] +final_list.extend([#html.H1('CRISPRitz Web Application'), + html.Div(children=''' + CRISPRitz is a software package containing 5 different tools dedicated to perform predictive analysis and result assessement on CRISPR/Cas experiments. + '''), + html.P()]) + +final_list.append( + html.Div( + [ + html.P(['Download the offline version here: ', html.A('InfOmics/CRISPRitz', href = 'https://github.com/InfOmics/CRISPRitz', target="_blank"), ' or ', html.A('Pinellolab/CRISPRitz', href = 'https://github.com/pinellolab/CRISPRitz', target="_blank") ]) + ] + ) +) +checklist_div = html.Div( + [ + dbc.FormGroup( + [ + dbc.Checkbox( + id="checkbox-gecko", className="form-check-input" + ), + dbc.Label( + #html.P(['Activate Gecko ', html.Abbr('comparison', title ='The results of your test guides will be compared with results obtained from a previous computed analysis on gecko library')]) , + html.P('Compare your results with the Gecko library'), + html_for="checkbox-gecko", + className="form-check-label", + ), + dbc.Checkbox( + id="checkbox-ref-comp", className="form-check-input" + ), + dbc.Label( + #html.P(['Activate Reference genome ', html.Abbr('comparison', title ='The results of your test guides will be compared with the results obtained from a computed analysis on the corresponding reference genome. Note: this may increase computational time')]) , + html.P('Compare your results with the corresponding reference genome'), + html_for="checkbox-ref-comp", + className="form-check-label", + ) + + ], + check = True + ) + ], + id = 'checklist-test-div' +) + +modal = html.Div( + [ + dbc.Modal( + [ + dbc.ModalHeader("WARNING! Missing inputs"), + dbc.ModalBody('The following inputs are missing, please select values before submitting the job', id = 'warning-list'), + dbc.ModalFooter( + dbc.Button("Close", id="close" , className="modal-button") + ), + ], + id="modal", + centered=True + ), + ] +) + +tab_guides_content = html.Div( + [ + html.P([ + 'Insert crRNA sequence(s), one per line.', + html.P('Sequences must have the same length and be provided without the PAM sequence', id = 'testP') , + ], + style = {'word-wrap': 'break-word'}), + + dcc.Textarea(id = 'text-guides', placeholder = 'GAGTCCGAGCAGAAGAAGAA\nCCATCGGTGGCCGTTTGCCC', style = {'width':'450px', 'height':'160px', 'font-family':'monospace', 'font-size':'large'}), + dbc.FormText('Note: a maximum number of 1000 sequences can be provided', color = 'secondary') + ], + style = {'width':'450px'} #same as text-area +) +tab_sequence_content = html.Div( + [ + html.P(['Search crRNAs by inserting one or more genomic sequences.', html.P('Chromosome ranges can also be supplied')], + style = {'word-wrap': 'break-word'}), + + dcc.Textarea(id = 'text-sequence', placeholder = '>sequence 1\nAAGTCCCAGGACTTCAGAAGagctgtgagaccttggc\n>sequence2\nchr1:11,130,540-11,130,751', style = {'width':'450px', 'height':'160px', 'font-family':'monospace', 'font-size':'large'}), + #html.P('Note: a maximum number of 1000 sequences can be provided'), + dbc.FormText('Note: a maximum number of 1000 characters can be provided', color = 'secondary') + ], + style = {'width':'450px'} #same as text-area +) +final_list.append( + html.Div( + html.Div( + [ + modal, + html.Div( + [ + + + html.Div([ + html.H3('STEP 1', style = {'margin-top':'0'}), + html.Div([ + html.P([html.Button(html.P("Load example", style={'color':'rgb(46,140,187)','text-decoration-line': 'underline'}), id='example-parameters', + style={ 'border': 'None', 'text-transform': 'capitalize','height':'12','font-weight': '500', 'padding': '0 0px','textcolor':'blu'}), ' - ', + #html.Br(), + html.Button(html.P(children="Reset", style={'color':'rgb(46,140,187)','text-decoration-line': 'underline'}), id='remove-parameters', + style={'border': 'None', 'text-transform': 'capitalize','height':'12','font-weight': '500', 'padding': '0 0px'})]) + ]) + ], className = 'flex-div-insert-delete-example'), + + html.P('Select a genome'), + html.Div( + dcc.Dropdown(options = gen_dir, clearable = False, id = "available-genome",) #style = {'width':'75%'}) + ), + dbc.FormText('Note: Genomes enriched with variants are indicated with a \'+\' symbol', color='secondary'), + + html.Div( + [ + html.Div( + [ + html.P('Select PAM'), + html.Div( + dcc.Dropdown(options = pam_file, clearable = False, id = 'available-pam') + ) + ], + style = {'flex':'0 0 50%', 'margin-top': '10%'} + ) + ], + id = 'div-pam', + className = 'flex-div-pam' + ), + html.Div( + [ + html.Ul( + [html.Li( + [html.A('Contact us', href = URL, target="_blank"),' to request new genomes availability in the dropdown list'], + style = {'margin-top':'5%'} + ), + html.Li( + [html.A('Download', href = 'https://github.com/InfOmics/CRISPRitz'), ' the offline version for more custom parameters'] + ) + ], + style = {'list-style':'inside'} + ), + # html.Div( + # html.Button('Insert example parameters', id = 'example-parameters', style={'display':'inline-block'}), + # style = {'text-align':'center'} + # ) + ], + style = {'height':'50%'} + ), + + ], + id = 'step1', + style = {'flex':'0 0 30%', 'tex-align':'center'} + ), + html.Div(style = {'border-right':'solid 1px white'}), + html.Div( + [ + html.H3('STEP 2', style = {'margin-top':'0'}), + html.Div( + [ + html.Div( + [ html.P('Select the input type'), + dbc.Tabs( + [ + dbc.Tab(tab_guides_content, label='Guides', tab_id= 'guide-tab'), + dbc.Tab(tab_sequence_content, label='Sequence', tab_id = 'sequence-tab') + ], + active_tab='guide-tab', + id = 'tabs' + ) + ], + id = 'div-guides' + ), + html.Div( + [ + html.P('Allowed mismatches'), + dcc.Dropdown(options = av_mismatches, clearable = False, id = 'mms', style = {'width':'60px'}), + html.P('Bulge DNA size'), + dcc.Dropdown(options = av_bulges, clearable = False, id = 'dna', style = {'width':'60px'}), + html.P('Bulge RNA size'), + dcc.Dropdown(options = av_bulges, clearable = False, id = 'rna', style = {'width':'60px'}), + dbc.Fade( + [ + html.P('crRNA length (without PAM)'), + dcc.Dropdown(options = av_guide_sequence, clearable = False, id = 'len-guide-sequence-ver', style = {'width':'60px'}) + ], + id = 'fade-len-guide', is_in= False, appear= False + ) + ] + ) + ], + className = 'flex-step2' + ) + + ], + id = 'step2', + style = {'flex':'0 0 40%'} + + ), + html.Div(style = {'border-right':'solid 1px white'}), + html.Div( + [ + html.H3('Advanced Options', style = {'margin-top':'0px'}), + checklist_div, + dcc.Checklist( + options = [ + {'label':'Notify me by email','value':'email', 'disabled':False} + ], + id = 'checklist-advanced', + ), + dbc.Fade( + [ + dbc.FormGroup( + [ + dbc.Label("Email", html_for="example-email"), + dbc.Input(type="email", id="example-email", placeholder="Enter email", className='exampleEmail'), + # dbc.FormText( + # "Are you on email? You simply have to be these days", + # color="secondary", + # ), + ] + ) + ], + id = 'fade', is_in= False, appear= False + ), + #html.H3('Submit', style = {'margin-top':'0'}), + html.Div( + [ + html.Button('Submit', id = 'check-job', style = {'background-color':'skyblue'}), + html.Button('', id = 'submit-job', style = {'display':'none'}) + ], + style = {'display':'inline-block', 'margin':'0 auto'} #style="height:55px; width:150px" + ) + ], + id = 'step3', + style = {'tex-align':'center'}, + className = 'flex-step3' + ) + ], + id = 'div-steps', + style = {'margin':'1%'}, + className = 'flex-div-steps' + ), + style = {'background-color':'rgba(154, 208, 150, 0.39)', 'border-radius': '10px', 'border':'1px solid black'}, + id = 'steps-background' + ) +) +index_page = html.Div(final_list, style = {'margin':'1%'}) + +#Load Page +final_list = [] +final_list.append( + html.Div( + html.Div( + html.Div( + [ + html.P('Job submitted. Copy this link to view the status and the result page '), + html.Div( + html.P('link', id = 'job-link'), + style = {'border':'2px solid', 'border-color':'blue' ,'width':'70%','display':'inline-block', 'margin':'5px'} + ) + ], + style = {'display':'inline-block'} + ), + style = {'display':'inline-block','background-color':'rgba(154, 208, 150, 0.39)', 'border-radius': '10px', 'border':'1px solid black', 'width':'70%'} + ), + style = {'text-align':'center'} + ) +) + +final_list.append( + html.Div( + [ + html.H4('Status report'), + html.Div( + [ + html.Div( + html.Ul( + [ + html.Li('Searching crRNA'), + html.Li('Annotating result'), + html.Li('Generating report') + ] + ), + style = {'flex':'0 0 20%'} + ), + html.Div( + html.Ul( + [ + html.Li('To do', style = {'color':'red'}, id = 'search-status'), + html.Li('To do', style = {'color':'red'}, id = 'annotate-result-status'), + html.Li('To do', style = {'color':'red'}, id = 'generate-report-status') + ], + style = {'list-style-type':'none'} + ) + ) + ], + className = 'flex-status' + ), + html.Div( + [ + dcc.Link('View Results', style = {'visibility':'hidden'}, id = 'view-results'), + html.Div(id = 'no-directory-error') + ] + ) + ], + id = 'div-status-report' + ) +) + +final_list.append(html.P('', id = 'done')) + +final_list.append(dcc.Interval(id = 'load-page-check', interval=3*1000)) +load_page = html.Div(final_list, style = {'margin':'1%'}) + + +#Test bootstrap page, go to /test-page to see +final_list = [] +# final_list.append(html.P('List of Targets found for the selected guide')) + +# df = pd.read_csv('esempio_tabella_cluster.txt', sep = '\t') +# exp_col = [] +# close_col = [] +# status_col = [] #'Top1' or 'Subcluster' +# df.drop( df[df['top'] == 's'].index, inplace = True) + +# for i in range (df.shape[0]): +# exp_col.append('+') +# close_col.append('-') +# status_col.append('Top1') +# df['Open'] = exp_col +# df['Close'] = close_col +# df['Status'] = status_col +# print('columns:', df.columns) +# final_list.append(dash_table.DataTable( +# id='table-expand', +# columns=[{"name": i, "id": i} for i in df.columns[:]], +# data=df.to_dict('records'), +# row_selectable = 'multi', +# style_data_conditional = [{ +# 'if': { +# 'filter_query': '{Variant unique} eq y', +# #'column_id' :'{#Bulge type}', +# #'column_id' :'{Total}' +# }, +# #'border-left': '5px solid rgba(255, 26, 26, 0.9)', +# 'background-color':'rgba(230, 0, 0,0.65)'#'rgb(255, 102, 102)' + +# }] +# )) +final_list.append(html.Div(id='test-div-for-button')) +test_page = html.Div(final_list, style = {'margin':'1%'}) + +#TEST PAGE 2 +final_list = [] +# final_list.append(html.P('List of Targets found for the selected guide')) +# final_list.append(html.P('Select a row to view the corresponding cluster')) +# df_example = pd.read_csv('esempio_tabella_cluster.txt', sep = '\t') +# print('TABELLA CLUSTER', df_example) +# #df_example.drop( df_example[df_example['top'] == 's'].index, inplace = True) +# final_list.append(dash_table.DataTable( +# id='double-table-one', +# columns=[{"name": i, "id": i} for i in df_example.columns[:-2]], +# data= df_example.loc[df_example['top'] == 't'].to_dict('records'), #df_example.to_dict('records'), +# css= [{ 'selector': 'td.cell--selected, td.focused', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;' }, { 'selector': 'td.cell--selected *, td.focused *', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;'}], + +# style_data_conditional = [{ +# 'if': { +# 'filter_query': '{Variant unique} eq y', +# #'column_id' :'{#Bulge type}', +# #'column_id' :'{Total}' +# }, +# #'border-left': '5px solid rgba(255, 26, 26, 0.9)', +# 'background-color':'rgba(255, 0, 0,0.15)'#'rgb(255, 102, 102)' + +# }] +# )) +# final_list.append(html.P()) +# final_list.append(html.P()) +# final_list.append(html.Div( +# dash_table.DataTable( +# id = 'double-table-two', +# #columns=[{"name": i, "id": i} for i in df_example.columns[:]], +# #fixed_rows = {'headers' : True, 'data': 0}, +# page_current=0, +# page_size=PAGE_SIZE, +# page_action='custom', +# virtualization = True, +# css= [{ 'selector': 'td.cell--selected, td.focused', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;' }, { 'selector': 'td.cell--selected *, td.focused *', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;'}], + +# style_data_conditional = [{ +# 'if': { +# 'filter_query': '{Variant unique} eq y', +# #'column_id' :'{#Bulge type}', +# #'column_id' :'{Total}' +# }, +# #'border-left': '5px solid rgba(255, 26, 26, 0.9)', +# 'background-color':'rgba(255, 0, 0,0.15)'#'rgb(255, 102, 102)' + +# }, +# { +# 'if': { +# 'filter_query': '{top} eq t', +# # 'column_id' :'BulgeType' +# }, +# # 'border-left': '5px solid rgba(26, 26, 255, 0.9)', +# 'font-weight':'bold' + + +# } + +# ] +# ), +# id = 'div-test-page2-second-table' +# ) +# ) +test_page2 = html.Div(final_list, style = {'margin':'1%'}) + +##################################################CALLBACKS################################################## +#Test callbacks + +# #Callback for test-page2 +# @app.callback( +# [Output('double-table-two', 'data'), +# Output('double-table-two', 'columns')], +# [Input('double-table-one', 'active_cell')], +# [State('double-table-one', 'data')] +# ) +# def loadSecondTable(active_cell, data): +# if active_cell is None: +# raise PreventUpdate + +# id_selected = data[active_cell['row']]['id'] + +# return df_example.loc[df_example['id'] == id_selected].to_dict('records'), [{"name": i, "id": i} for i in list(data[0].keys())[:-2]] + + + + +# #IDEA aggiungo colonna che mi indica se è top1 o solo parte del cluster, se l'utente clicca su +, faccio vedere anche quelle corrispondenti a quel cluster +# @app.callback( +# Output('table-expand', 'data'), +# #[Input('table-expand', 'active_cell')], +# [Input('table-expand','selected_rows')], +# [State('table-expand', 'data'), +# State('table-expand', 'selected_row_ids')] +# ) +# def expand(active_cell, data, sri): #Callback for test-page +# if active_cell is None: +# raise PreventUpdate + +# #df = pd.DataFrame(data) +# df = pd.read_csv('esempio_tabella_cluster.txt', sep = '\t') +# print('Sel row', active_cell) + +# print('Sel row id', sri) +# print('Data', data) +# df.drop( df[(df['top'] == 's') & (~( df['id'].isin(sri)))].index, inplace = True) +# # for i in range (df.shape[0]): +# # exp_col.append('+') +# # close_col.append('-') +# # status_col.append('Top1') +# # df['Open'] = exp_col +# # df['Close'] = close_col +# # df['Status'] = status_col +# print('df',df) +# return df.to_dict('records') +# # if active_cell['column_id'] == 'Open': +# # if df.iat[active_cell['row'], -1] == 'Top1': +# # df.iat[active_cell['row'], -1] = 'Subcluster' +# # df.loc[-1] = 'n' +# # return df.to_dict('records') +# # else: +# # raise PreventUpdate +# # elif active_cell['column_id'] == 'Close': +# # if df.iat[active_cell['row'], -1] == 'Subcluster': +# # df.iat[active_cell['row'], -1] = 'Top1' +# # df.drop(df.tail(1).index,inplace=True) +# # return df.to_dict('records') +# # raise PreventUpdate + + +################################################# +#Fade in/out email +@app.callback( + Output("fade", "is_in"), + [Input("checklist-advanced", "value")], + [State("fade", "is_in")], +) +def toggle_fade(selected_options, is_in): + ''' + Selezionando l'opzione Notify me by email, compare una box in cui poter inserire l'email + ''' + if selected_options is None: + return False + if 'email' in selected_options: + return True + return False + +# Insert/Delete example input +@app.callback( + [Output('available-genome', 'value'), + Output('available-pam', 'value'), + Output('text-guides', 'value'), + Output('mms', 'value'), + Output('dna', 'value'), + Output('rna', 'value'), + Output('len-guide-sequence-ver', 'value'), + Output('text-sequence', 'value')], + [Input('example-parameters', 'n_clicks_timestamp'), + Input('remove-parameters', 'n_clicks_timestamp')] +) +def inExample(nI, nR): + ''' + Bottone per inserire degli input di esempio. + Bottone reset esegue il punto a), ma non cancella le spunte delle Advanced Options + a) cancellare tutto + b) cancellare solo i campi i cui valori sono uguali a quelli di esempio + c) se almeno un campo è diverso dal valore di esempio, non cancello nulla) + ''' + + if (nI is None) and (nR is None): + raise PreventUpdate + + if nI is None: + nI = 0 + + if nR is None: + nR = 0 + + if nI > 0: + if nI > nR: + return gen_dir[0]['value'], '5\'-NGG-3\'', 'GAGTCCGAGCAGAAGAAGAA\nCCATCGGTGGCCGTTTGCCC', '4', '0', '0', '20', '>sequence\nTACCCCAAACGCGGAGGCGCCTCGGGAAGGCGAGGTGGGCAAGTTCAATGCCAAGCGTGACGGGGGA' + + + if nR > 0: + if nR > nI: + return '', '', '', '', '', '', '', '' + + + + # TODO salvare una cartella speciale in results che abbia i risultati di questa ricerca in modo da non occupare il server con + # questa ricerca di esempio, ma all'utente passo già la cartella fatta (questa parte già implementata) + # return '', '', '', '', '', '', '' + + +#Email validity +@app.callback( + Output('example-email', 'style'), + [Input('example-email', 'value')] +) +def checkEmailValidity(val): + ''' + Controlla se l'email inserita è valida, cambiando il bordo in rosso o verde + ''' + if val is None: + raise PreventUpdate + + if '@' in val: + return {'border':'1px solid #94f033', 'outline':'0'} + return {'border':'1px solid red'} + +#Fade in guide len dropdown for sequence tabs version +@app.callback( + Output('fade-len-guide', 'is_in'), + [Input('tabs', 'active_tab')], + [State('fade-len-guide', 'is_in')] +) +def resetTab(current_tab, is_in): + ''' + Fa comparire/scomparire il dropdown per la lunghezza delle guide se l'utente seleziona l'opzione Sequence + ''' + if current_tab is None: + raise PreventUpdate + + if current_tab == 'guide-tab': + return False + return True + + +#Check input presence +@app.callback( + [Output('submit-job', 'n_clicks'), + Output('modal', 'is_open'), + Output('available-genome', 'className'), + Output('available-pam', 'className'), + Output('text-guides', 'style'), + Output('mms', 'className'), + Output('dna', 'className'), + Output('rna', 'className'), + Output('len-guide-sequence-ver', 'className'), + Output('warning-list', 'children')], + [Input('check-job','n_clicks'), + Input('close','n_clicks')], + [State('available-genome', 'value'), + State('available-pam','value'), + State('text-guides', 'value'), + State('mms','value'), + State('dna','value'), + State('rna','value'), + State('len-guide-sequence-ver','value'), + State('tabs','active_tab'), + State("modal", "is_open")] +) +def checkInput(n, n_close, genome_selected, pam, text_guides, mms, dna, rna, len_guide_seq, active_tab ,is_open): + ''' + La funzione prende i valori dei vari campi di input e controlla che siano tutti presenti, tranne la textbox delle guide e della sequenza. + Se qualcuno manca, colora il suo bordo di rosso e fa uscire un avviso con l'elenco degli input mancanti. + La callback si aziona anche quando l'utente clicca Close nel modal e in quel caso chiude l'avviso + ''' + if n is None: + raise PreventUpdate + if is_open is None: + is_open = False + + classname_red = 'missing-input' + genome_update = None + pam_update = None + text_update = {'width':'450px', 'height':'160px'} + mms_update = None + dna_update = None + rna_update = None + len_guide_update = None + update_style = False + miss_input_list = [] + + if genome_selected is None or genome_selected is '': + genome_update = classname_red + update_style = True + miss_input_list.append('Genome') + if pam is None or pam is '': + pam_update = classname_red + update_style = True + miss_input_list.append('PAM') + # if text_guides is None or text_guides is '': + # text_update = {'width':'450px', 'height':'160px','border': '1px solid red'} + # update_style = True + # miss_input_list.append('crRNA sequence(s)') + if mms is None or str(mms) is '': + mms_update = classname_red + update_style = True + miss_input_list.append('Allowed Mismatches') + if dna is None or str(dna) is '': + dna_update = classname_red + update_style = True + miss_input_list.append('Bulge DNA size') + if rna is None or str(rna) is '': + rna_update = classname_red + update_style = True + miss_input_list.append('Bulge RNA size') + if (len_guide_seq is None or str(len_guide_seq) is '') and ('sequence-tab' in active_tab): + len_guide_update = classname_red + update_style = True + miss_input_list.append('crRNA length') + miss_input = html.Div( + [ + html.P('The following inputs are missing:'), + html.Ul([html.Li(x) for x in miss_input_list]), + html.P('Please fill in the values before submitting the job') + ] + ) + + if not update_style: + return 1, False, genome_update, pam_update, text_update, mms_update, dna_update, rna_update, len_guide_update, miss_input + return None, not is_open, genome_update, pam_update, text_update, mms_update, dna_update, rna_update, len_guide_update, miss_input + +#Submit Job, change url +@app.callback( + [Output('url', 'pathname'), + Output('url','search')], + [Input('submit-job','n_clicks')], + [State('url', 'href'), + State('available-genome', 'value'), + State('available-pam','value'), + State('text-guides', 'value'), + State('mms','value'), + State('dna','value'), + State('rna','value'), + State('checkbox-gecko','checked'), + State('checkbox-ref-comp', 'checked'), + State('checklist-advanced', 'value'), + State('example-email','value'), + State('tabs','active_tab'), + State('text-sequence','value'), + State('len-guide-sequence-ver', 'value')] +) +def changeUrl(n, href, genome_selected, pam, text_guides, mms, dna, rna, gecko_opt, genome_ref_opt, adv_opts,dest_email, active_tab, text_sequence, len_guide_sequence): #NOTE startJob + ''' + genome_selected can be Human genome (hg19), or Human Genome (hg19) + 1000 Genome Project, the '+' character defines the ref or enr version. + Note that pam parameter can be 5'-NGG-3', but the corresponding filename is 5'-NGG-3'.txt + Pam file (5'-NGG-3'.txt) is structured as NGG 3, or TTTN -4. The created pam.txt inside the result directory add the corresponding N's + Annotations path file is named genome_name_annotationpath.txt, where genome_name is the reference genome name + + La funzione crea una cartella dal nome random per identificare il job, controlla che opzioni sono state aggiunte e salva il contatto della mail. + Estrae i parametri dati in input per poterli utilizzare con crispritz. Salva un file Params.txt con i parametri della ricerca. Controlla poi se + un'altra ricerca è stata fatta con gli stessi parametri e nel caso copia i risultati nella cartella di questo job. + Fa partire lo script submit_job per eseguire crispritz. + ''' + if n is None: + raise PreventUpdate + + #Check input, else give simple input + if genome_selected is None or genome_selected is '': + genome_selected = 'hg19_ref' + if pam is None or pam is '': + pam = '5\'-NGG-3\'' + if text_guides is None or text_guides is '': + text_guides = 'GAGTCCGAGCAGAAGAAGAA\nCCATCGGTGGCCGTTTGCCC' + else: + text_guides = text_guides.strip() + if len(text_guides.split('\n')) > 1000: + text_guides = '\n'.join(text_guides.split('\n')[:1000]).strip() + if ( not all(len(elem) == len(text_guides.split('\n')[0]) for elem in text_guides.split('\n'))): + text_guides = selectSameLenGuides(text_guides) + if (len_guide_sequence is None or str(len_guide_sequence) is '') and ('sequence-tab' in active_tab): + len_guide_sequence = 20 + if (text_sequence is None or text_sequence is '') and ('sequence-tab' in active_tab): + text_sequence = '>sequence\nTACCCCAAACGCGGAGGCGCCTCGGGAAGGCGAGGTGGGCAAGTTCAATGCCAAGCGTGACGGGGGA' + + job_id = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 10)) + result_dir = 'Results/' + job_id + subprocess.run(['mkdir ' + result_dir], shell = True) + + search_index = True + search = True + annotation = True + report = True + gecko_comp = False + ref_comparison = False + send_email = False + if adv_opts is None: + adv_opts = [] + if gecko_opt: + gecko_comp = True + if genome_ref_opt: + ref_comparison = True + if 'email' in adv_opts and dest_email is not None and len(dest_email.split('@')) > 1 and dest_email.split('@')[-1] is not '': + send_email = True + with open(result_dir + '/email.txt', 'w') as e: + e.write(dest_email + '\n') + e.write(URL + '/load?job=' + job_id + '\n') + e.write(datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S") + '\n') + e.write('Job done. Parameters: etc etc') + e.close() + + + #Set parameters + genome_selected = genome_selected.replace(' ', '_') + genome_ref = genome_selected.split('+')[0] #+ char to separate ref and vcf, eg Human_genome+1000_genome_project + if genome_ref == genome_selected: + ref_comparison = False + #NOTE Indexed genomes names are PAM + _ + bMax + _ + genome_selected + + pam_len = 0 + custom_pam = None + + with open('pam/' + pam + '.txt') as pam_file: + pam_char = pam_file.readline() + index_pam_value = pam_char.split(' ')[-1] + if int(pam_char.split(' ')[-1]) < 0: + end_idx = int(pam_char.split(' ')[-1]) * (-1) + pam_char = pam_char.split(' ')[0][0 : end_idx] + pam_len = end_idx + pam_begin = True + else: + end_idx = int(pam_char.split(' ')[-1]) + pam_char = pam_char.split(' ')[0][end_idx * (-1):] + pam_len = end_idx + pam_begin = False + + if 'sequence-tab' in active_tab: + #Extract sequence and create the guides + guides = [] + for name_and_seq in text_sequence.split('>'): + if '' == name_and_seq: + continue + name, seq = name_and_seq.strip().split('\n') + if 'chr' in seq: + extracted_seq = extract_seq.extractSequence(name, seq, genome_selected.replace(' ', '_')) + else: + extracted_seq = seq.strip() + guides.extend(convert_pam.getGuides(extracted_seq, pam_char, len_guide_sequence)) + text_guides = '\n'.join(guides).strip() + + + len_guides = len(text_guides.split('\n')[0]) + if (pam_begin): + pam_to_file = pam_char + ('N' * len_guides) + ' ' + index_pam_value + else: + pam_to_file = ('N' * len_guides) + pam_char + ' ' + index_pam_value + + save_pam_file = open(result_dir + '/pam.txt', 'w') + save_pam_file.write(pam_to_file) + save_pam_file.close() + pam = result_dir + '/pam.txt' + + guides_file = result_dir + '/guides.txt' + if text_guides is not None and text_guides is not '': + save_guides_file = open(result_dir + '/guides.txt', 'w') + if (pam_begin): + text_guides = 'N' * pam_len + text_guides.replace('\n', '\n' + 'N' * pam_len) + else: + text_guides = text_guides.replace('\n', 'N' * pam_len + '\n') + 'N' * pam_len + save_guides_file.write(text_guides) + save_guides_file.close() + + if (int(dna) == 0 and int(rna) == 0): + search_index = False + max_bulges = rna + if (int(dna) > int(rna)): + max_bulges = dna + + if (search_index): + search = False + + + + genome_idx = pam_char + '_' + '5' + '_' + genome_selected #TODO CUSTOM: modificare per la versione UI offline + genome_idx_ref = genome_idx.split('+')[0] + + #Create Params.txt file + with open(result_dir + '/Params.txt', 'w') as p: #NOTE if modified, chenge also mms value in update_table function + p.write('Genome_selected\t' + genome_selected + '\n') + p.write('Genome_ref\t' + genome_ref + '\n') + if search_index: + p.write('Genome_idx\t' + genome_idx + '\n') + else: + p.write('Genome_idx\t' + 'None\n') + p.write('Pam\t' + pam_char + '\n') + p.write('Max_bulges\t' + str(max_bulges) + '\n') + p.write('Mismatches\t' + str(mms) + '\n') + p.write('DNA\t' + str(dna) + '\n') + p.write('RNA\t' + str(rna) + '\n') + p.write('Gecko\t' + str(gecko_comp) + '\n') + p.write('Ref_comp\t' + str(ref_comparison) + '\n') + p.close() + + #Check if input parameters (mms, bulges, pam, guides, genome) are the same as a previous search #TODO per migliorare, semplicemente modificare il job id in quello della cartella con i ris già fatti + all_result_dirs = [f for f in listdir('Results') if isdir(join('Results', f))] + all_result_dirs.remove(job_id) + #all_result_dirs.remove('test') + for check_param_dir in all_result_dirs: + if os.path.exists('Results/' + check_param_dir + '/Params.txt'): + if os.path.exists('Results/' + check_param_dir + '/log.txt'): + with open('Results/' + check_param_dir + '/log.txt') as log: + if ('Job\tDone' in log.read()): + if (filecmp.cmp('Results/' + check_param_dir + '/Params.txt', result_dir + '/Params.txt' )): + guides1 = open('Results/' + check_param_dir + '/guides.txt').read().split('\n') + guides2 = open('Results/' + job_id + '/guides.txt').read().split('\n') + if (collections.Counter(guides1) == collections.Counter(guides2)): + search = False + search_index = False + subprocess.run(['cp $PWD/Results/' + check_param_dir + '/' + check_param_dir + '* ' + result_dir + '/'], shell = True) + subprocess.run(['cp $PWD/Results/' + check_param_dir + '/*.png ' + result_dir + '/'], shell = True) + subprocess.run(['rename \'s/' + check_param_dir + '/' + job_id + '/g\' ' + result_dir + '/*'], shell = True) + break + + #Annotation + if (not search and not search_index): + annotation = False + + #Generate report + if (not search and not search_index): + report = False + + #TODO aggiungere annotazioni per ogni genoma + annotation_file = [f for f in listdir('annotations/') if isfile(join('annotations/', f)) and f.startswith(genome_ref)] + + genome_type = 'ref' #Indicates if search is 'ref', 'var' or 'both' + if '+' in genome_selected: + genome_type = 'var' + if ref_comparison: + genome_type = 'both' + subprocess.Popen(['assets/./submit_job.sh ' + 'Results/' + job_id + ' ' + 'Genomes/' + genome_selected + ' ' + 'Genomes/' + genome_ref + ' ' + 'genome_library/' + genome_idx + ( + ' ' + pam + ' ' + guides_file + ' ' + str(mms) + ' ' + str(dna) + ' ' + str(rna) + ' ' + str(search_index) + ' ' + str(search) + ' ' + str(annotation) + ( + ' ' + str(report) + ' ' + str(gecko_comp) + ' ' + str(ref_comparison) + ' ' + 'genome_library/' + genome_idx_ref + ' ' + str(send_email) + ' ' + 'annotations/' + annotation_file[0] + + ' ' + genome_type + ) + )], shell = True) + return '/load','?job=' + job_id + +#When url changed, load new page +@app.callback( + [Output('page-content', 'children'), + Output('job-link', 'children')], + [Input('url', 'href'), Input('url','pathname'), Input('url','search')],[State('url','hash')] + # [State('url','pathname'), + # State('url','search')] +) +def changePage( href, path, search, hash_guide): + ''' + Controllo della pagina da mostrare in base all'url + ''' + # print('href', href) + # print('hash', hash_guide) + # print('pathname', path) + # print('search', search) + #print('hash', hash_guide) + if path == '/load': + return load_page, URL + '/load' + search + if path == '/result': + job_id = search.split('=')[-1] + if hash_guide is None or hash_guide is '': + return resultPage(job_id), URL + '/load' + search + if 'new' in hash_guide: #TODO cambiare nome alla pagina delle guide + return guidePagev3(job_id, hash_guide.split('#')[1]), URL + '/load' + search + if '-Sample-' in hash_guide: + return samplePage(job_id, hash_guide.split('#')[1]), URL + '/load' + search + if '-Pos-' in hash_guide: + return clusterPage(job_id, hash_guide.split('#')[1]), URL + '/load' + search + return guidePagev2(job_id, hash_guide.split('#')[1]), URL + '/load' + search #TODO sistemare pagina di default + if path == '/test-page': + return test_page, URL + '/load' + search + if path == '/test-page2': + return test_page2, URL + '/load' + search + + return index_page, '' + +#Check end job +@app.callback( + [Output('view-results', 'style'), + Output('annotate-result-status', 'children'), + Output('search-status', 'children'), + Output('generate-report-status', 'children'), + Output('view-results','href'), + Output('no-directory-error', 'children')], + [Input('load-page-check', 'n_intervals')], + [State('url', 'search')] +) +def refreshSearch(n, dir_name): + ''' + Il componente Interval chiama questa funzione ogni 3 secondi. Essa controlla lo stato del lavoro e aggiorna la pagina se una parte del lavoro + è stata fatta. + Quando la ricerca è finita, visualizza un link per passare alla pagina dei risultati + Se il job non esiste, ritorna un avviso di errore + TODO sarebbe più comodo che automaticamente la pagina si reindirizzi ai risultati quando il job è fatto + + ''' + if n is None: + raise PreventUpdate + + onlydir = [f for f in listdir('Results') if isdir(join('Results', f))] + current_job_dir = 'Results/' + dir_name.split('=')[-1] + '/' + if dir_name.split('=')[-1] in onlydir: + onlyfile = [f for f in listdir(current_job_dir) if isfile(join(current_job_dir, f))] + if 'log.txt' in onlyfile: + with open(current_job_dir + 'log.txt') as log: + all_done = 0 + annotate_res_status = html.P('To do', style = {'color':'red'}) + search_status = html.P('To do', style = {'color':'red'}) + report_status = html.P('To do', style = {'color':'red'}) + current_log = log.read() + if ('Annotation\tDone' in current_log): + annotate_res_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if ('Search-index\tDone' in current_log and 'Search\tDone' in current_log): + search_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if ('Report\tDone' in current_log): + report_status = html.P('Done', style = {'color':'green'}) + all_done = all_done + 1 + if all_done == 3: + return {'visibility':'visible'}, annotate_res_status, search_status, report_status, '/result?job=' + dir_name.split('=')[-1], '' + else: + return {'visibility':'hidden'}, annotate_res_status, search_status, report_status,'', '' + return {'visibility':'hidden'}, html.P('Not available', style = {'color':'red'}), html.P('Not available', style = {'color':'red'}), html.P('Not available', style = {'color':'red'}), '', dbc.Alert("The selected result does not exist", color = "danger") + +#Perform expensive loading of a dataframe and save result into 'global store' +#Cache are in the Cache directory +@cache.memoize() +def global_store(value): + ''' + Caching dei file targets per una miglior performance di visualizzazione + ''' + if value is None: + return '' + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('scores.txt') ] + if not target: + target = [f for f in listdir('Results/' + value) if isfile(join('Results/'+value, f)) and f.endswith('targets.txt') ] + + df = pd.read_csv('Results/' +value + '/' + target[0], sep = '\t') + df.rename(columns = {"#Bulge type":'BulgeType', '#Bulge_type':'BulgeType','Bulge Size': 'BulgeSize', 'Bulge_Size': 'BulgeSize', 'Doench 2016':'Doench2016','Doench_2016':'Doench2016'}, inplace = True) + return df + +# #Callback to populate the tab, note that it's called when the result_page is loaded (dash implementation), so we do not use raise update to block this first callback +# @app.callback( +# [Output('signal','children'), +# Output('result-table','page_current'), +# Output('result-table', "sort_by"), +# Output('result-table','filter_query')], +# [Input('url', 'pathname')], +# [State('url', 'search')] +# ) +# def populateTable(pathname, search): +# print('pathname', pathname) +# if pathname != '/result': +# raise PreventUpdate + +# job_id = search.split('=')[-1] +# job_directory = 'Results/' + job_id + '/' +# print('job dir', job_directory) +# if(not isdir(job_directory)): +# return 'not_exists', 0, [], '' +# #global_store(job_id) +# print('ok') +# return job_id, 0, [], '' + +#Send the data when next or prev button is clicked on the result table +@app.callback( + Output('result-table', 'data'), + [Input('result-table', "page_current"), + Input('result-table', "page_size"), + Input('result-table', "sort_by"), + Input('result-table', 'filter_query')], + [State('url', 'search'), + State('url', 'hash')] +) +def update_table(page_current, page_size, sort_by, filter, search, hash_guide): + ''' + La funzione ritorna uno split dei risultati in base ad un filtering o a un sort da parte dell'utente. Inoltre aggiorna i risultati + visualizzati quando il bottone next page / prev page è cliccato. (Codice preso dalla pagina dash datatable sul sorting con python) + Inoltre carica i file targets, o scores se presente, e lo trasforma in un dataframe, cambiando il nome delle colonne per farle corrispondere + all'id delle colonne della tabella nella pagina. + Se non ci sono targets ritorna un avviso di errore + ''' + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + guide = hash_guide.split('#')[1] + value = job_id + if search is None: + raise PreventUpdate + + filtering_expressions = filter.split(' && ') + #filtering_expressions.append(['{crRNA} = ' + guide]) + df = global_store(value) + dff = df[df['crRNA'] == guide] + print('len index before', len(dff.index)) + sort_by.insert(0, {'column_id' : 'Mismatches', 'direction': 'asc'}) + sort_by.insert(1, {'column_id' : 'BulgeSize', 'direction': 'asc'}) + #sort_by.insert(2, {'column_id': 'CFD', 'direction':'desc'}) + for filter_part in filtering_expressions: + col_name, operator, filter_value = split_filter_part(filter_part) + + if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # these operators match pandas series operator method names + dff = dff.loc[getattr(dff[col_name], operator)(filter_value)].sort_values([col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False) + elif operator == 'contains': + dff = dff.loc[dff[col_name].str.contains(filter_value)] + elif operator == 'datestartswith': + # this is a simplification of the front-end filtering logic, + # only works with complete fields in standard format + dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + print('len index after', len(dff.index)) + #NOTE sort_by: [{'column_id': 'BulgeType', 'direction': 'asc'}, {'column_id': 'crRNA', 'direction': 'asc'}] + #sort_by.insert(0, {'column_id' : 'Mismatches', 'direction': 'asc'}) + #sort_by.insert(0, {'column_id' : 'BulgeSize', 'direction': 'asc'}) + if len(sort_by): + dff = dff.sort_values( + [col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False + ) + + #Check if results are not 0 + warning_no_res = '' + with open(job_directory + job_id + '.targets.txt') as t: + no_result = False + t.readline() + last_line = t.readline() + if (last_line is '' or last_line is '\n'): + no_result = True + + if (no_result): + warning_no_res = dbc.Alert("No results were found with the given parameters", color = "warning") + + + return dff.iloc[ + page_current*page_size:(page_current+ 1)*page_size + ].to_dict('records') + + +#For filtering +def split_filter_part(filter_part): + ''' + Preso dal sito di dash sul filtering datatables con python + ''' + for operator_type in operators: + for operator in operator_type: + if operator in filter_part: + name_part, value_part = filter_part.split(operator, 1) + name = name_part[name_part.find('{') + 1: name_part.rfind('}')] + + value_part = value_part.strip() + v0 = value_part[0] + if (v0 == value_part[-1] and v0 in ("'", '"', '`')): + value = value_part[1: -1].replace('\\' + v0, v0) + else: + try: + value = float(value_part) + except ValueError: + value = value_part + + # word operators need spaces after them in the filter string, + # but we don't want these later + return name, operator_type[0].strip(), value + + return [None] * 3 + + +#Read the uploaded file and converts into bit +def parse_contents(contents): + content_type, content_string = contents.split(',') + + decoded = base64.b64decode(content_string) + return decoded + + +#Generate column of images +@app.callback( + Output('all-images','children'), + + [Input('btn0', 'n_clicks_timestamp'), + Input('btn1', 'n_clicks_timestamp'), + Input('btn2', 'n_clicks_timestamp'), + Input('btn3', 'n_clicks_timestamp'), + Input('btn4', 'n_clicks_timestamp'), + Input('btn5', 'n_clicks_timestamp'), + Input('btn6', 'n_clicks_timestamp'), + Input('btn7', 'n_clicks_timestamp'), + Input('btn8', 'n_clicks_timestamp'), + Input('btn9', 'n_clicks_timestamp'), + #Input('btn10', 'n_clicks_timestamp'), + Input('btnAll','n_clicks_timestamp'), + Input('btn-summary-table','n_clicks_timestamp'), + Input('btn-summary-samples','n_clicks_timestamp'), + Input('btn-summary-position', 'n_clicks_timestamp'), + Input('general-profile-table', 'selected_cells')], + [#State('general-profile-table', 'selected_cells'), + State('general-profile-table', 'data'), + State('url', 'search'), + State('div-genome-type', 'children')] +) +def loadColumnImages(n0, n1, n2, n3, n4, n5, n6, n7, n8, n9, nAll, nSumTab, nSumSam, nSumPos, sel_cel, all_guides, search, genome_type): + ''' + Carica le immagini corrispondenti alla guida selezionata. Se non ho una cella selezionata non mostra niente. + La funzione carica le immagini dalla cartella Results/job_id usando una codifica, le mette all'interno di un link che + fa aprire l'immagine corrispondente presente nella cartella assets/Img/job_id. + TODO -> DONE aggiungere all'input tutti i bottoni (da 0 mismatches a 7 mismatches + bottone allImg), in modo che l'utente possa selezionare un bottone + e avere solo le immagini corrispondenti a quel valore di mismatch. Per fare ciò, si usa n_clicks_timestamp per vedere l'ultimo bottone cliccato + e prendere le immagini corrispondeti. Al momento se un bottone non è mai stato cliccato il suo valore è 0, al momento se tutti i bottoni sono + a zero faccio vedere tutte le immagini, ma in futuro potrebbe cambiare, magari far vedere solo le img con 1 mms + ''' + if sel_cel is None : + raise PreventUpdate + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + guide = all_guides[int(sel_cel[0]['row'])]['Guide'] + + with open('Results/' + job_id + '/Params.txt') as p: + all_params = p.read() + mms = (next(s for s in all_params.split('\n') if 'Mismatches' in s)).split('\t')[-1] + genome_selected = (next(s for s in all_params.split('\n') if 'Genome_selected' in s)).split('\t')[-1] + max_bulges = (next(s for s in all_params.split('\n') if 'Max_bulges' in s)).split('\t')[-1] + + fl = [] + fl.append(html.Br()) + fl.append(html.H5('Focus on: ' + guide)) + + if not n0: + n0 = 0 + if not n1: + n1 = 0 + if not n2: + n2 = 0 + if not n3: + n3 = 0 + if not n4: + n4 = 0 + if not n5: + n5 = 0 + if not n6: + n6 = 0 + if not n7: + n7 = 0 + if not n8: + n8 = 0 + if not n9: + n9 = 0 + # if not n10: + # n10 = 0 + + if not nAll: + nAll = 0 + if not nSumTab: + nSumTab = 0 + if not nSumSam: + nSumSam = 0 + if not nSumPos: + nSumPos = 0 + btn_group = [] + btn_group.append(n0) + btn_group.append(n1) + btn_group.append(n2) + btn_group.append(n3) + btn_group.append(n4) + btn_group.append(n5) + btn_group.append(n6) + btn_group.append(n7) + btn_group.append(n8) + btn_group.append(n9) + #btn_group.append(n10) + btn_group.append(nAll) + btn_group.append(nSumTab) + btn_group.append(nSumSam) + btn_group.append(nSumPos) + show_image = True + if max(btn_group) == n0: + min_mm = 0 + max_mm = 1 + elif max(btn_group) == n1: + min_mm = 1 + max_mm = 2 + elif max(btn_group) == n2: + min_mm = 2 + max_mm = 3 + elif max(btn_group) == n3: + min_mm = 3 + max_mm = 4 + elif max(btn_group) == n4: + min_mm = 4 + max_mm = 5 + elif max(btn_group) == n5: + min_mm = 5 + max_mm = 6 + elif max(btn_group) == n6: + min_mm = 6 + max_mm = 7 + elif max(btn_group) == n7: + min_mm = 7 + max_mm = 8 + elif max(btn_group) == n8: + min_mm = 8 + max_mm = 9 + elif max(btn_group) == n9: + min_mm = 9 + max_mm = 10 + elif max(btn_group) == nAll: + min_mm = 0 + max_mm = int(mms) + 1 + else: + show_image = False + if max(btn_group) == 0: + show_image = False + if show_image: + for i in range (min_mm, max_mm): #uso un for per comprendere anche il caso di showAllImages + radar_img = 'summary_single_guide_' + guide + '_' + str(i) + 'mm.png' + + barplot_img = 'summary_histogram_' + guide + '_' + str(i) + 'mm.png' + try: #NOTE serve per non generare errori se il barplot non è stato fatto + barplot_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + barplot_img, 'rb').read()).decode()) + barplot_href = 'assets/Img/' + job_id + '/' + barplot_img + except: + barplot_src = '' + barplot_href = '' + + try: + radar_src = 'data:image/png;base64,{}'.format(base64.b64encode(open('Results/' + job_id + '/' + radar_img, 'rb').read()).decode()) + except: + radar_src = '' + try: + radar_href = 'assets/Img/' + job_id + '/' + radar_img + except: + radar_href = '' + fl.append( + html.Div( + [ + dbc.Row( + [ + dbc.Col( + html.A( + html.Img(src = radar_src, id = 'barplot-img', width="75%", height="auto"), + target="_blank", + href = radar_href + ), + ), + dbc.Col( + html.A( + html.Img(src = barplot_src,id = 'barplot-img', width="75%", height="auto"), + target="_blank", + href = barplot_href + ) + ) + ], + ), + ] + ) + ) + fl.append(html.Br()) + fl.append(html.Br()) + else: + if max(btn_group) == nSumTab: + #Show Summary by Guide table + fl = [] + fl.append(html.Br()) + fl.append(html.H5('Focus on: ' + guide)) + #fl.append(html.P(['View all targets found with the selected guide ' , html.A('here', href = URL + '/result?job=' + job_id + '#' + guide, target = '_blank')])) #TODO ultime 3 righe uguali a sopra, sistemare + df = pd.read_pickle(job_directory + job_id + '.summary_by_guide.' + guide + '.txt') + + df.drop( df[(df['Bulge Size'] == 0) & ((df['Bulge Type'] == 'DNA') | ((df['Bulge Type'] == 'RNA'))) | (df['Number of targets'] == 0) ].index, inplace = True) + more_info_col = [] + total_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + total_col.append(df['Bulge Size']) + df[''] = more_info_col + df['Total'] = df['Bulge Size'] + df['Mismatches'] + if genome_type == 'both': + df = df.sort_values(['Total', 'Targets created by SNPs'], ascending = [True, False]) + else: + df = df.sort_values('Total', ascending = True) + del df['Total'] + #print (df) + fl.append(html.Div( + generate_table(df, 'test_id_tab', guide, job_id ), style = {'text-align': 'center'} + ) + ) + return fl + elif max (btn_group) == nSumSam: + #Show Summary by Sample table + fl = [] + fl.append(html.Br()) + fl.append(html.H5('Focus on: ' + guide)) + if genome_type == 'both': + df = pd.read_csv(job_directory + job_id + '.summary_by_samples.' + guide + '.txt', sep = '\t', names = ['Sample', 'Number of targets', 'Targets created by SNPs', 'Population'], skiprows = 1) + df = df.sort_values(['Number of targets', 'Targets created by SNPs'], ascending = [False, True]) + else: + df = pd.read_csv(job_directory + job_id + '.summary_by_samples.' + guide + '.txt', sep = '\t', names = ['Sample', 'Number of targets', 'Population'], skiprows = 1) + df = df.sort_values('Number of targets', ascending = False) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + + #TODO if genoma selezionato è hg19/38, con varianti, allora aggiungo queste pop (se per esempio seleziono mouse, devo mettere i ceppi) + super_populations = [{'label':i, 'value':i} for i in population_1000gp.keys()] + populations = [] + for k in population_1000gp.keys(): + for i in population_1000gp[k]: + populations.append({'label':i, 'value':i}) + fl.append( + html.Div + ( + [ + dbc.Row( + [ + dbc.Col(html.Div(dcc.Dropdown(options = super_populations, id = 'dropdown-superpopulation-sample', placeholder = 'Select a Super Population'))), + dbc.Col(html.Div(dcc.Dropdown(options = populations, id = 'dropdown-population-sample', placeholder = 'Select a Population'))), + dbc.Col(html.Div(html.Button('Filter', id = 'button-filter-population-sample'))) + ] + ), + ], + style = {'width':'50%'} + ) + ) + + fl.append(html.Div( + generate_table_samples(df, 'table-samples', 1, guide, job_id ), style = {'text-align': 'center'}, id = 'div-table-samples' + ) + ) + fl.append( + html.Div( + [ + html.Button('Prev', id = 'prev-page-sample'), + html.Button('Next', id = 'next-page-sample') + ], + style = {'text-align': 'center'} + ) + ) + fl.append(html.Div('1', id= 'div-current-page-table-samples')) + return fl + else: + #Show Summary by position table + + #Dropdown chromosomes + onlyfile = [f for f in listdir('Genomes/' + genome_selected) if (isfile(join('Genomes/' + genome_selected, f)) and (f.endswith('.fa') or f.endswith('.fasta')))] + onlyfile = [x[:x.rfind('.')] for x in onlyfile] #removed .fa for better visualization + chr_file = [] + chr_file_unset = [] + for chr_name in onlyfile: + chr_name = chr_name.replace('.enriched', '') + if '_' in chr_name: + chr_file_unset.append(chr_name) + # chr_file_unset.append({'label': chr_name, 'value' : chr_name}) + else: + chr_file.append(chr_name) + # chr_file.append({'label': chr_name, 'value' : chr_name}) + chr_file.sort(key = lambda s: [int(t) if t.isdigit() else t.lower() for t in re.split('(\d+)', s)]) + chr_file_unset.sort(key = lambda s: [int(t) if t.isdigit() else t.lower() for t in re.split('(\d+)', s)]) + chr_file += chr_file_unset + chr_file = [{'label': chr_name, 'value' : chr_name} for chr_name in chr_file] + start_global = time.time() + fl = [] + fl.append(html.Br()) + fl.append(html.H5('Focus on: ' + guide)) + # Colonne tabella: chr, pos, target migliore, min mm, min bulges, num target per ogni categoria di mm e bulge, show targets; ordine per total, poi mm e poi bulge + start_time = time.time() + df = pd.read_csv( job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t', nrows = 100) + #df = pd.read_csv('0YT6LD1ECN.summary_position.CTAACAGTTGCTTTTATCACNNN.txt', sep = '\t') #TODO cancellare + df.rename(columns = {'#Chromosome':'Chromosome'}, inplace = True) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + + fl.append( + html.Div + ( + [ + dbc.Row( + [ + dbc.Col(html.Div(dcc.Dropdown(options = chr_file, id = 'dropdown-chr-table-position', placeholder = 'Select a chromosome'))), + dbc.Col(html.Div(dcc.Input(placeholder = 'Start Position', id = 'input-position-start'))), + dbc.Col(html.Div(dcc.Input(placeholder = 'End Position', id = 'input-position-end'))), + dbc.Col(html.Div(html.Button('Filter', id = 'button-filter-position'))) + ] + ), + ], + style = {'width':'50%'} + ) + ) + print('Position dataframe ready', time.time() - start_time) + #max_bulges = '2' #TODO remove + + start_time = time.time() + fl.append(html.Div( + generate_table_position(df, 'table-position', 1 , int(mms), int(max_bulges), guide, job_id ), style = {'text-align': 'center'}, id = 'div-table-position' + ) + ) + print('Position table ready', time.time() - start_time) + fl.append( + html.Div( + [ + html.Button('Prev', id = 'prev-page-position'), + html.Button('Next', id = 'next-page-position') + ], + style = {'text-align': 'center'} + ) + ) + print('Else poistion section finished', time.time() - start_global) + fl.append(html.Div('1', id= 'div-current-page-table-position')) + fl.append(html.Div(mms + '-' + max_bulges, id = 'div-mms-bulges-position', style = {'display':'none'})) + return fl + + return fl + + +def generate_table(dataframe, id_table, guide='', job_id='', max_rows=2600): + ''' + Per generare una html table. NOTE è diversa da una dash dataTable + ''' + return html.Table( + # Header + [html.Tr([html.Th(col) for col in dataframe.columns]) ] + + # Body + [html.Tr([ + html.Td(html.A(dataframe.iloc[i][col], href = 'result?job=' + job_id + '#' + guide +'new' + dataframe.iloc[i]['Bulge Type'] + str(dataframe.iloc[i]['Bulge Size']) + str(dataframe.iloc[i]['Mismatches']) , target = '_blank' )) if col == '' else html.Td(dataframe.iloc[i][col]) for col in dataframe.columns + ]) for i in range(min(len(dataframe), max_rows))], + style = {'display':'inline-block'}, + id = id_table + ) + +def generate_table_samples(dataframe, id_table, page ,guide='', job_id='', max_rows=10): + ''' + Per generare una html table. NOTE è diversa da una dash dataTable + ''' + rows_remaining = len(dataframe) - (page - 1) * max_rows + return html.Table( + # Header + [html.Tr([html.Th(col) for col in dataframe.columns]) ] + + # Body + [html.Tr([ + html.Td(html.A(dataframe.iloc[i + (page - 1)*max_rows][col], href = 'result?job=' + job_id + '#' + guide +'-Sample-' + dataframe.iloc[i]['Sample'] , target = '_blank' )) if col == '' else html.Td(dataframe.iloc[i + (page - 1)*max_rows][col]) for col in dataframe.columns + ]) for i in range(min(rows_remaining, max_rows))], + style = {'display':'inline-block'}, + id = id_table + ) + +# def generate_table_position(dataframe, id_table, page, guide = '', job_id = '', max_rows = 10): #NOTE v1 della tabella posizioni +# ''' +# Per generare una html table. NOTE è diversa da una dash dataTable +# ''' +# rows_remaining = len(dataframe) - (page - 1) * max_rows + +# return html.Table( +# # Header +# [html.Tr([html.Th(col) for col in dataframe.columns]) ] + +# # Body +# [html.Tr([ +# html.Td(html.A(dataframe.iloc[i + (page - 1)*max_rows][col], href = 'result?job=' + job_id + '#' + guide +'-Pos-' + str(dataframe.iloc[i + (page - 1)*max_rows]['Chromosome']) + '-' + str(dataframe.iloc[i + (page - 1)*max_rows]['Position']) , target = '_blank' )) if col == '' else html.Td(dataframe.iloc[i + (page - 1)*max_rows][col]) for col in dataframe.columns +# ]) for i in range(min(rows_remaining, max_rows))], +# style = {'display':'inline-block'}, +# id = id_table +# ) + +def generate_table_position(dataframe, id_table, page, mms, bulges, guide = '', job_id = '', max_rows = 10): #NOTE v2 della tabella posizioni #TODO modifica layout righe per allinearle + rows_remaining = len(dataframe) - (page - 1) * max_rows + header = [html.Tr([ + html.Th('Chromosome', rowSpan = '2', style = {'vertical-align':'middle', 'text-align':'center'}), + html.Th('Position', rowSpan = '2', style = {'vertical-align':'middle', 'text-align':'center'}), + html.Th('Best Target', rowSpan = '2', style = {'vertical-align':'middle', 'text-align':'center'}), + html.Th('Min Mismatch', rowSpan = '2', style = {'vertical-align':'middle', 'text-align':'center'}), + html.Th('Min Bulge', rowSpan = '2', style = {'vertical-align':'middle', 'text-align':'center'}), + html.Th('Bulge', rowSpan = '2', style = {'vertical-align':'middle', 'text-align':'center'}), + html.Th('Targets by mismatch value', colSpan = str(mms +1), style = {'vertical-align':'middle', 'text-align':'center'}), + html.Th('', rowSpan = '2'), + ]) + ] + mms_header = [] + for mm in range (mms +1): + mms_header.append(html.Th(str(mm) + ' MM', style = {'vertical-align':'middle', 'text-align':'center'})) + header.append(html.Tr(mms_header)) + + data = [] + for i in range(min(rows_remaining, max_rows)): + first_cells = [ + html.Td(dataframe.iloc[i + (page - 1)*max_rows]['Chromosome'], rowSpan = str(bulges +1), style = {'vertical-align':'middle', 'text-align':'center'}), + html.Td(dataframe.iloc[i + (page - 1)*max_rows]['Position'], rowSpan = str(bulges +1), style = {'vertical-align':'middle', 'text-align':'center'}), + html.Td(dataframe.iloc[i + (page - 1)*max_rows]['Best Target'], rowSpan = str(bulges+1), style = {'vertical-align':'middle', 'text-align':'center'}), + html.Td(dataframe.iloc[i + (page - 1)*max_rows]['Min Mismatch'], rowSpan = str(bulges+1), style = {'vertical-align':'middle', 'text-align':'center'}), + html.Td(dataframe.iloc[i + (page - 1)*max_rows]['Min Bulge'], rowSpan = str(bulges+1), style = {'vertical-align':'middle', 'text-align':'center'}), + html.Th('0 Bulge', style = {'vertical-align':'middle', 'text-align':'center', 'padding-left':'0'}) + ] + + mm_cells = [html.Td(dataframe.iloc[i + (page - 1)*max_rows][col], style = {'vertical-align':'middle', 'text-align':'center'}) for col in dataframe.columns[5:5+mms+1]] + data.append(html.Tr(first_cells + mm_cells + [html.Td( + html.A('Show Targets', href = 'result?job=' + job_id + '#' + guide +'-Pos-' + str(dataframe.iloc[i + (page - 1)*max_rows]['Chromosome']) + '-' + str(dataframe.iloc[i + (page - 1)*max_rows]['Position']) , target = '_blank'), + rowSpan = str(bulges+1), style = {'vertical-align':'middle', 'text-align':'center'}) + ])) + for b in range (bulges): + data.append( + html.Tr( + [html.Th(str(b +1) + ' Bulge', style = {'vertical-align':'middle', 'text-align':'center'} )] + + + [html.Td(dataframe.iloc[i + (page - 1)*max_rows][col]) for col in dataframe.columns[5 + (b +1) *(mms +1) : 5 + (b +1) *(mms+1) + mms +1]] + ) + ) + + return html.Table(header + data, style = {'display':'inline-block'}, id = id_table) + + +#Callback to update the population tab based on superpopulation selected +@app.callback( + [Output('dropdown-population-sample', 'options'), + Output('dropdown-population-sample', 'value')], + [Input('dropdown-superpopulation-sample', 'value')] +) +def updatePopulationDrop(superpop): + if superpop is None or superpop is '': + raise PreventUpdate + return [{'label':i, 'value':i} for i in population_1000gp[superpop]], None #TODO adjust for other population file (eg mouse) + +#Callback to view next/prev page on sample table +@app.callback( + [Output('div-table-samples', 'children'), + Output('div-current-page-table-samples', 'children')], + [Input('button-filter-population-sample', 'n_clicks_timestamp'), + Input('prev-page-sample', 'n_clicks_timestamp'), + Input('next-page-sample', 'n_clicks_timestamp')], + [State('dropdown-superpopulation-sample', 'value'), + State('dropdown-population-sample', 'value'), + State('url', 'search'), + State('general-profile-table', 'selected_cells'), + State('general-profile-table', 'data'), + State('div-current-page-table-samples', 'children')] +) +def filterSampleTable(n, nPrev, nNext, sup_pop, pop, search, sel_cel, all_guides, current_page): + if sel_cel is None: + raise PreventUpdate + if nPrev is None and nNext is None and n is None: + raise PreventUpdate + + if nPrev is None: + nPrev = 0 + if nNext is None: + nNext = 0 + if n is None: + n = 0 + current_page = int(current_page) + btn_sample_section = [] + btn_sample_section.append(n) + btn_sample_section.append(nPrev) + btn_sample_section.append(nNext) + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + with open('Results/' + job_id + '/Params.txt') as p: + all_params = p.read() + genome_type_f = (next(s for s in all_params.split('\n') if 'Genome_selected' in s)).split('\t')[-1] + ref_comp = (next(s for s in all_params.split('\n') if 'Ref_comp' in s)).split('\t')[-1] + + genome_type = 'ref' + if '+' in genome_type_f: + genome_type = 'var' + if 'True' in ref_comp: + genome_type = 'both' + + guide = all_guides[int(sel_cel[0]['row'])]['Guide'] + if max(btn_sample_section) == n: #Last button pressed is filtering, return the first page of the filtered table + if (sup_pop is None or sup_pop is '') and (pop is None or pop is ''): #No filter value selected #TODO implementare che se cancello i filtri ritorno i valori originali + raise PreventUpdate + if genome_type == 'both': + df = pd.read_csv(job_directory + job_id + '.summary_by_samples.' + guide + '.txt', sep = '\t', names = ['Sample', 'Number of targets', 'Targets created by SNPs', 'Population'], skiprows = 1) + df = df.sort_values(['Number of targets', 'Targets created by SNPs'], ascending = [False, True]) + else: + df = pd.read_csv(job_directory + job_id + '.summary_by_samples.' + guide + '.txt', sep = '\t', names = ['Sample', 'Number of targets', 'Population'], skiprows = 1) + df = df.sort_values('Number of targets', ascending = False) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + if pop is None or pop is '': + df.drop(df[(~(df['Population'].isin(population_1000gp[sup_pop])))].index , inplace = True) + else: + df.drop(df[(df['Population'] != pop)].index , inplace = True) + return generate_table_samples(df, 'table-samples', 1, guide, job_id ), 1 + else: + if max(btn_sample_section) == nNext: + current_page = current_page + 1 + if genome_type == 'both': + df = pd.read_csv(job_directory + job_id + '.summary_by_samples.' + guide + '.txt', sep = '\t', names = ['Sample', 'Number of targets', 'Targets created by SNPs', 'Population'], skiprows = 1) + df = df.sort_values(['Number of targets', 'Targets created by SNPs'], ascending = [False, True]) + else: + df = pd.read_csv(job_directory + job_id + '.summary_by_samples.' + guide + '.txt', sep = '\t', names = ['Sample', 'Number of targets', 'Population'], skiprows = 1) + df = df.sort_values('Number of targets', ascending = False) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + #Active filter + if pop or sup_pop: + if pop is None or pop is '': + df.drop(df[(~(df['Population'].isin(population_1000gp[sup_pop])))].index , inplace = True) + else: + df.drop(df[(df['Population'] != pop)].index , inplace = True) + + if ((current_page - 1) * 10) > len(df): + current_page = current_page -1 + if current_page < 1: + current_page = 1 + return generate_table_samples(df, 'table-samples', current_page, guide, job_id ), current_page + + else: #Go to previous page + current_page = current_page - 1 + if current_page < 1: + current_page = 1 + if genome_type == 'both': + df = pd.read_csv(job_directory + job_id + '.summary_by_samples.' + guide + '.txt', sep = '\t', names = ['Sample', 'Number of targets', 'Targets created by SNPs', 'Population'], skiprows = 1) + df = df.sort_values(['Number of targets', 'Targets created by SNPs'], ascending = [False, True]) + else: + df = pd.read_csv(job_directory + job_id + '.summary_by_samples.' + guide + '.txt', sep = '\t', names = ['Sample', 'Number of targets', 'Population'], skiprows = 1) + df = df.sort_values('Number of targets', ascending = False) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + if pop or sup_pop: + if pop is None or pop is '': + df.drop(df[(~(df['Population'].isin(population_1000gp[sup_pop])))].index , inplace = True) + else: + df.drop(df[(df['Population'] != pop)].index , inplace = True) + return generate_table_samples(df, 'table-samples', current_page, guide, job_id ), current_page + raise PreventUpdate + + + +#Callback to filter chr from Summary by Position table, and to show next/prev page +@app.callback( + [Output('div-table-position', 'children'), + Output('div-current-page-table-position', 'children')], + [Input('button-filter-position', 'n_clicks_timestamp'), + Input('prev-page-position','n_clicks_timestamp'), + Input('next-page-position', 'n_clicks_timestamp')], + [State('dropdown-chr-table-position', 'value'), + State('input-position-start', 'value'), + State('input-position-end','value'), + State('url', 'search'), + State('general-profile-table', 'selected_cells'), + State('general-profile-table', 'data'), + State('div-current-page-table-position', 'children'), + State('div-mms-bulges-position', 'children')] +)#TODO test filter position con risultati filtrati +#BUG se metto chr1 ma non clicco filtering e poi vado in next/prev il filtering è comunque applicato +def filterPositionTable(n, nPrev, nNext, chr, pos_begin, pos_end, search, sel_cel, all_guides, current_page, mms_bulge): + if sel_cel is None: + raise PreventUpdate + if nPrev is None and nNext is None and n is None: + raise PreventUpdate + + if nPrev is None: + nPrev = 0 + if nNext is None: + nNext = 0 + if n is None: + n = 0 + current_page = int(current_page) + mms = int(mms_bulge.split('-')[0]) + max_bulges = int(mms_bulge.split('-')[1]) + btn_position_section = [] + btn_position_section.append(n) + btn_position_section.append(nPrev) + btn_position_section.append(nNext) + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + guide = all_guides[int(sel_cel[0]['row'])]['Guide'] + if max(btn_position_section) == n: #Last button pressed is filtering, return the first page of the filtered table + if pos_begin is None or pos_begin is '': + pos_begin = 0 + if pos_end is '': + pos_end = None + if pos_end: + if int(pos_end) < int(pos_begin): + pos_end = None + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t') #TODO cambiare nome file con quello giusto (job_id.guida.tab_position.txt) + + df.rename(columns = {'#Chromosome':'Chromosome'}, inplace = True) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + if chr is None or chr is '': + return generate_table_position(df, 'table-position', 1, mms, max_bulges,guide, job_id ), 1 + if pos_end is None: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) )].index , inplace = True) + else: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) | (df['Position'] > int(pos_end)))].index , inplace = True) + return generate_table_position(df, 'table-position', 1, mms, max_bulges,guide, job_id ), 1 + else: + + if max(btn_position_section) == nNext: + current_page = current_page + 1 + if chr: + if pos_begin is None or pos_begin is '': + pos_begin = 0 + if pos_end is '': + pos_end = None + if pos_end: + if int(pos_end) < int(pos_begin): + pos_end = None + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t') #TODO cambiare nome file con quello giusto (job_id.guida.tab_position.txt) + #df = pd.read_csv('0YT6LD1ECN.summary_position.CTAACAGTTGCTTTTATCACNNN.txt', sep = '\t') #TODO cancellare + else: + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t', nrows = current_page * 10) #TODO cambiare nome file con quello giusto (job_id.guida.tab_position.txt) + #df = pd.read_csv('0YT6LD1ECN.summary_position.CTAACAGTTGCTTTTATCACNNN.txt', sep = '\t', nrows = current_page * 10) #TODO cancellare + df.rename(columns = {'#Chromosome':'Chromosome'}, inplace = True) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + if chr: + if pos_end is None: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) )].index , inplace = True) + else: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) | (df['Position'] > int(pos_end)))].index , inplace = True) + if ((current_page - 1) * 10) > len(df): + current_page = current_page -1 + if current_page < 1: + current_page = 1 + return generate_table_position(df, 'table-position', current_page, mms, max_bulges,guide, job_id ), current_page + else: #Go to previous page + current_page = current_page - 1 + if current_page < 1: + current_page = 1 + + if chr: + if pos_begin is None or pos_begin is '': + pos_begin = 0 + if pos_end is '': + pos_end = None + if pos_end: + if int(pos_end) < int(pos_begin): + pos_end = None + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t') #TODO cambiare nome file con quello giusto (job_id.guida.tab_position.txt) + #df = pd.read_csv('0YT6LD1ECN.summary_position.CTAACAGTTGCTTTTATCACNNN.txt', sep = '\t') #TODO cancellare + else: + df = pd.read_csv(job_directory + job_id + '.summary_by_position.' + guide +'.txt', sep = '\t', nrows = current_page * 10) #TODO cambiare nome file con quello giusto (job_id.guida.tab_position.txt) + #df = pd.read_csv('0YT6LD1ECN.summary_position.CTAACAGTTGCTTTTATCACNNN.txt', sep = '\t',nrows = current_page * 10) #TODO cancellare + df.rename(columns = {'#Chromosome':'Chromosome'}, inplace = True) + more_info_col = [] + for i in range(df.shape[0]): + more_info_col.append('Show Targets') + df[''] = more_info_col + if chr: + if pos_end is None: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) )].index , inplace = True) + else: + df.drop(df[(df['Chromosome'] != chr) | ((df['Chromosome'] == chr) & (df['Position'] < int(pos_begin)) | (df['Position'] > int(pos_end)))].index , inplace = True) + return generate_table_position(df, 'table-position', current_page, mms, max_bulges,guide, job_id ), current_page + +#FOR BUTTON IN TABLE +# element.style { +# background: none; +# border: none; +# margin: 0; +# padding: 0; +# cursor: pointer; +# font-family: monospace; +# font-size: large; +# font-weight: normal; +# } + + + + +#If the input guides are different len, select the ones with same length as the first +def selectSameLenGuides(list_guides): + ''' + Se l'utente mette guide di lunghezza diversa, la funzione prende la lunghezza della prima guida e salva solo le guide con quella lunghezza + ''' + selected_length = len(list_guides.split('\n')[0]) + same_len_guides_list = [] + for guide in list_guides.split('\n'): + if len(guide) == selected_length: + same_len_guides_list.append(guide) + same_len_guides = '\n'.join(same_len_guides_list).strip() + return same_len_guides + +def resultPage(job_id): + ''' + La funzione ritorna il layout della pagina risultati (tabella delle guide + eventuali immagini). Nella tabella delle guide + carico il profile ottenuto dalla ricerca. Carica inoltre l'ACFD, che è il cfd score aggregato per tutti i risultati di una singola guida. + Crea poi 10 bottoni: un numero pari a mismatches + 2 che sono visibili, il resto con style = {'display':'none'}, così ho sempre il numero + esatto di bottoni per mismatches in base ai mms dati in input nella ricerca (serve a risolvere problemi con le callback che hanno input + da elementi non creati. In questo caso io creo tutti i possibili bottoni ma ne rendo visibili/disponibili solo il numero corretto in base + ai mms). + TODO usare className per i bottoni e modificare lo stile, oppure vedere se è possibile usare un buttongroup. Vedere quale è meglio. + TODO al momento la tabella delle guide è ordinata per acfd, rendere possibile anche l'ordinamento e il filtering da parte dell'utente, + usare il codice presente in dash datatable filtering con python. + ''' + value = job_id + job_directory = 'Results/' + job_id + '/' + warning_message = [] + if (not isdir(job_directory)): + return html.Div(dbc.Alert("The selected result does not exist", color = "danger")) + + #Load mismatches + with open('Results/' + value + '/Params.txt') as p: + all_params = p.read() + mms = (next(s for s in all_params.split('\n') if 'Mismatches' in s)).split('\t')[-1] + genome_type_f = (next(s for s in all_params.split('\n') if 'Genome_selected' in s)).split('\t')[-1] + ref_comp = (next(s for s in all_params.split('\n') if 'Ref_comp' in s)).split('\t')[-1] + + genome_type = 'ref' + if '+' in genome_type_f: + genome_type = 'var' + if 'True' in ref_comp: + genome_type = 'both' + mms = int(mms[0]) + mms_values = [{'label':i, 'value':i} for i in range(mms + 1) ] + + col_profile_general = ['Total On-Targets', 'Total Off-Targets'] + for i in range(mms): + col_profile_general.append(str(i+1) + ' Mismatches') + col_type = ['numeric' for i in col_profile_general] + + + #Load profile + try: + profile = pd.read_csv('Results/' + value + '/' + value + '.profile_complete.xls') #NOTE profile_complete has ',' as separator + if len(profile.columns) == 1: + profile = pd.read_csv('Results/' + value + '/' + value + '.profile_complete.xls', sep='\t') + except: + profile = pd.read_csv('Results/' + value + '/' + value + '.profile.xls', sep = '\t') #NOTE profile has \t as separator or ',' + if len(profile.columns) == 1: + profile = pd.read_csv('Results/' + value + '/' + value + '.profile.xls') + #load acfd for each guide + with open('Results/' + value + '/acfd.txt') as a: + all_scores = a.read().strip().split('\n') + + if 'NO SCORES' not in all_scores: + all_scores.sort() + acfd = [float(a.split('\t')[1]) for a in all_scores] + doench = [int(a.split('\t')[2]) for a in all_scores] + acfd = [int(round((100/(100 + x))*100)) for x in acfd] + columns_profile_table = [{'name':'Guide', 'id' : 'Guide', 'type':'text'}, {'name':'CFD', 'id': 'CFD', 'type':'numeric'}, {'name':'Doench 2016', 'id': 'Doench 2016', 'type':'numeric'} ,{'name':'Total On-Targets', 'id' : 'Total On-Targets', 'type':'numeric'}, {'name':'Total Off-targets', 'id' : 'Total Off-Targets', 'type':'numeric'}] + else: + columns_profile_table = [{'name':'Guide', 'id' : 'Guide', 'type':'text'}, {'name':'Total On-Targets', 'id' : 'Total On-Targets', 'type':'numeric'}, {'name':'Total Off-targets', 'id' : 'Total Off-Targets', 'type':'numeric'}] + + keep_column = ['GUIDE', 'ONT', 'OFFT'] + for i in range (mms): + #columns_profile_table.append({'name': str(i+1) + ' Mismatches', 'id':str(i+1) + ' Mismatches', 'type':'numeric'}) + keep_column.append(str(i+1) + 'MM') + + profile = profile[keep_column] + rename_columns = {'GUIDE':'Guide',"ONT":'Total On-Targets', 'OFFT':'Total Off-Targets'} + col_targetfor = 'Targets for ' + for i in range(mms): + rename_columns[str(i+1) + 'MM'] = str(i+1) + ' Mismatches' + col_targetfor = col_targetfor + str(i) + '-' + col_targetfor = col_targetfor + str(mms) + col_targetfor = col_targetfor + ' mismatches' + columns_profile_table.append({'name': col_targetfor, 'id' : 'col_targetfor', 'type':'text'}) + profile.rename(columns = rename_columns, inplace = True) #Now profile is Guide, Total On-targets, ... + col_to_add = [] + tmp_col_to_add = [] + for row in profile.itertuples(): + for i in range (mms+1): + if i == 0: + tmp_col_to_add.append(str(profile['Total On-Targets'][row[0]])) + else: + tmp_col_to_add.append(str(profile[str(i) + ' Mismatches'][row[0]])) + col_to_add.append(' - '.join(tmp_col_to_add)) + tmp_col_to_add = [] + + profile['col_targetfor'] = col_to_add + + if genome_type == 'var': + columns_profile_table.append({'name':'Targets in samples', 'id':'Targets in samples', 'type':'numeric'}) + column_sample_total = [] + for guide in profile.Guide.unique(): + with open ('Results/' + value + '/' + value + '.summary_by_samples.' + guide + '.txt', 'r') as sample_list: + sample_total = sample_list.readline().strip().split('\t')[1] + column_sample_total.append(sample_total) + profile['Targets in samples'] = column_sample_total + + + final_list = [] + + + final_list.append( + html.H3('Result Summary') + ) + + + + profile = profile.sort_values('Guide') + profile['CFD'] = acfd + profile['Doench 2016'] = doench + + profile = profile.sort_values(['CFD', 'Doench 2016'], ascending = [False, False]) + final_list.append(html.P('Select a guide by clicking on a row to view more information')) + final_list.append( + html.Div( + dash_table.DataTable( + id = 'general-profile-table', + page_size=PAGE_SIZE, + columns = columns_profile_table, + data = profile.to_dict('records'), + selected_cells = [{'row':0, 'column':0}], + css= [{ 'selector': 'td.cell--selected, td.focused', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;' }, { 'selector': 'td.cell--selected *, td.focused *', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;'}] + ) + ,id = 'div-general-profile-table') + ) + + final_list.append(html.Br()) + + #Create 10 buttons hidden and show when guide is selected, when button is pressed, show image with corresponding mm + for i in range (10): + if (i <= mms): + final_list.append( + html.Button(str(i) + ' mm',id = 'btn' + str(i)), + ) + else: + final_list.append( + html.Button(str(i) + ' mm',id = 'btn' + str(i), style = {'display':'none'}), + ) + final_list.append( + html.Button('Show all',id = 'btnAll'), + ) + final_list.append( + html.Button('Summary by Guide', id = 'btn-summary-table') + ) + style_samples = {} + if genome_type == 'ref': + style_samples = {'display':'none'} + final_list.append( + html.Button('Summary by Samples', id = 'btn-summary-samples', style = style_samples) + ) + final_list.append( + html.Button('Summary by Position', id = 'btn-summary-position') + ) + + final_list.append( + html.Div(id = 'all-images') + ) + + final_list.append(html.Div(genome_type, style = {'display':'none'}, id = 'div-genome-type')) + result_page = html.Div(final_list, style = {'margin':'1%'}) + return result_page + + +def guidePage(job_id, guide): + ''' + Crea il layuot della pagina che contiene tutti i targets della guida selezionata + ''' + value = job_id + final_list = [] + final_list.append(html.P('List of Targets found for the selected guide')) + col_list = ['BulgeType', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Direction', 'Mismatches', 'BulgeSize', 'CFD', 'Doench2016'] + col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric'] + cols = [{"name": i, "id": i, 'type':t} for i,t in zip(col_list, col_type)] + job_directory = 'Results/' + job_id + '/' + #Load mismatches + with open('Results/' + value + '/Params.txt') as p: + mms = (next(s for s in p.read().split('\n') if 'Mismatches' in s)).split('\t')[-1] + + mms = int(mms[0]) + mms_values = [{'label':i, 'value':i} for i in range(mms + 1) ] + global_store(job_id) #TODO controllare se carica ogni volta o solo la prima + #NOTE the filtering is done automatically when the page is loaded due to the function update_table since it's triggered when the table is created, putting page_current, sort_by etc + #at initial values + + # df = global_store(job_id) + # dff = df + # filtering_expressions = ['{crRNA} = ' + guide] + # for filter_part in filtering_expressions: + # col_name, operator, filter_value = split_filter_part(filter_part) + + # if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # # these operators match pandas series operator method names + # dff = dff.loc[getattr(dff[col_name], operator)(filter_value)] + # elif operator == 'contains': + # dff = dff.loc[dff[col_name].str.contains(filter_value)] + # elif operator == 'datestartswith': + # # this is a simplification of the front-end filtering logic, + # # only works with complete fields in standard format + # dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + final_list.append( + html.Div( + dash_table.DataTable( + id='result-table', + columns=cols, + #data = dff.to_dict('records'), + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='', + style_table={ + 'height': '300px', + #'overflowY': 'scroll', + }, + # style_data_conditional=[{ + # "if": {'column_id':'BulgeType', 'filter_query' : 'BulgeType eq "RNA"'}, #{'filter_query' : 'BulgeType eq "RNA"'}, + # "backgroundColor": "lightblue", + # 'color': 'white' + # }], + ), + id = 'div-result-table', + ) + ) + final_list.append(html.Br()) + + return html.Div(final_list, style = {'margin':'1%'}) + +def guidePagev2(job_id, guide): + ''' + Crea il layout della pagina che contiene tutti i targets della guida selezionata, aggiornato per includere colonne PAM-creation, disruption, samples, etc + Il file da caricare è il total (uniq + semicommon) + ''' + value = job_id + final_list = [] + final_list.append(html.P('List of Targets found for the selected guide')) + col_list = ['BulgeType', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Direction', 'Mismatches', 'BulgeSize', 'Total', 'Min_mismatches', 'Max_mismatches', 'PAM disruption', 'PAM creation', 'Variant unique', 'Samples'] + col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'text', 'text', 'text', 'text', 'text'] + cols = [{"name": i, "id": i, 'type':t} for i,t in zip(col_list, col_type)] + job_directory = 'Results/' + job_id + '/' + #Load mismatches + with open('Results/' + value + '/Params.txt') as p: + mms = (next(s for s in p.read().split('\n') if 'Mismatches' in s)).split('\t')[-1] + + mms = int(mms[0]) + mms_values = [{'label':i, 'value':i} for i in range(mms + 1) ] + global_store(job_id) #TODO controllare se carica ogni volta o solo la prima + #NOTE the filtering is done automatically when the page is loaded due to the function update_table since it's triggered when the table is created, putting page_current, sort_by etc + #at initial values + + # df = global_store(job_id) + # dff = df + # filtering_expressions = ['{crRNA} = ' + guide] + # for filter_part in filtering_expressions: + # col_name, operator, filter_value = split_filter_part(filter_part) + + # if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # # these operators match pandas series operator method names + # dff = dff.loc[getattr(dff[col_name], operator)(filter_value)] + # elif operator == 'contains': + # dff = dff.loc[dff[col_name].str.contains(filter_value)] + # elif operator == 'datestartswith': + # # this is a simplification of the front-end filtering logic, + # # only works with complete fields in standard format + # dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + final_list.append( + html.Div( + dash_table.DataTable( + id='result-table', + columns=cols, + #data = dff.to_dict('records'), + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='', + style_table={ + 'height': '300px', + #'overflowY': 'scroll', + }, + # style_data_conditional=[ + # { + # 'if': { + # 'filter_query': '{Variant unique} eq y', + # 'column_id' :'BulgeType' + # }, + # #'border-left': '5px solid rgba(255, 26, 26, 0.9)', + # 'background-color':'red' + + + # }, + # { + # 'if': { + # 'filter_query': '{Total} eq 3', #TODO change to {Variant unique} eq + # 'column_id' :'BulgeType' + # }, + # 'border-left': '5px solid rgba(26, 26, 255, 0.9)', + + # } + + # ] + + ), + id = 'div-result-table', + ) + ) + final_list.append(html.Br()) + + return html.Div(final_list, style = {'margin':'1%'}) + +@cache.memoize() +def global_store_subset(value, bulge_t, bulge_s, mms, guide): + ''' + Caching dei file targets per una miglior performance di visualizzazione + ''' + if value is None: + return '' + + df = pd.read_csv( 'Results/' + value + '/' + value + '.' + bulge_t + bulge_s + mms + '.' + guide +'.txt', sep = '\t', header = None) + #df.rename(columns = {"#Bulge type":'Bulge Type', "#Bulge_type":'Bulge Type', 'Bulge_Size':'Bulge Size'}, inplace = True) + return df + + +def guidePagev3(job_id, hash): + guide = hash[:hash.find('new')] + mms = hash[-1:] + bulge_s = hash[-2:-1] + if 'DNA' in hash: + bulge_t = 'DNA' + elif 'RNA' in hash: + bulge_t = 'RNA' + else: + bulge_t = 'X' + + value = job_id + + with open('Results/' + value + '/Params.txt') as p: + all_params = p.read() + genome_type_f = (next(s for s in all_params.split('\n') if 'Genome_selected' in s)).split('\t')[-1] + ref_comp = (next(s for s in all_params.split('\n') if 'Ref_comp' in s)).split('\t')[-1] + + genome_type = 'ref' + if '+' in genome_type_f: + genome_type = 'var' + if 'True' in ref_comp: + genome_type = 'both' + final_list = [] + final_list.append(html.H3('Selected Guide: ' + guide)) + final_list.append(html.P('List of Targets found for the selected guide')) + if genome_type == 'ref': + col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Cluster Position' ,'Direction', 'Mismatches', 'Bulge Size', 'Total'] + col_type = ['text','text','text','text','numeric', 'numeric','text','numeric', 'numeric', 'numeric'] + file_to_grep = 'targets.cluster' + elif genome_type == 'var': + col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Cluster Position' ,'Direction', 'Mismatches', 'Bulge Size', 'Total', 'Min_mismatches', 'Max_mismatches', 'PAM disruption'] + col_type = ['text','text','text','text','numeric', 'numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'text'] + file_to_grep = 'targets.cluster.minmaxdisr' + else: + col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Direction', 'Mismatches', 'Bulge Size', 'Total', 'Min_mismatches', 'Max_mismatches', 'PAM disruption', 'PAM creation', 'Variant unique', 'Samples'] + col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'text', 'text', 'text', 'text', 'text'] + cols = [{"name": i, "id": i, 'type':t, 'hideable':True} for i,t in zip(col_list, col_type)] + job_directory = 'Results/' + job_id + '/' + + start = time.time() + subprocess.call(['PostProcess/./grep_specific_targets.sh ' + bulge_t + ' ' + bulge_s + ' ' + mms + ' ' + guide[0] + ' ' + guide[1] + ' ' + job_id + ' ' + guide + ' ' + file_to_grep], shell = True) #TODO migliorare + global_store_subset(job_id, bulge_t, bulge_s, mms,guide) + #subset_targets = pd.read_csv('example_todo_delete.txt', sep = '\t') + #data_dict = subset_targets.to_dict('records') + print('Grep e load:', time.time() - start) + final_list.append( + html.Div( + dash_table.DataTable( + id='table-subset-target', + columns=cols, + #data = subset_targets.to_dict('records'), + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + #fixed_columns = {'headers': True, 'data':1}, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='', + style_table={ + 'height': '600px' + #'overflowY': 'scroll', + }, + style_cell_conditional=[ + { + 'if': {'column_id': 'Samples'}, + 'textAlign': 'left' + } + ], + css= [{ 'selector': 'td.cell--selected, td.focused', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;' }, { 'selector': 'td.cell--selected *, td.focused *', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;'}], + export_format = 'csv' + ), + id = 'div-result-table', + ) + ) + final_list.append(html.Br()) + + return html.Div(final_list, style = {'margin':'1%'}) + +#Send the data when next or prev button is clicked on the result table +@app.callback( + [Output('table-subset-target', 'data'), + Output('table-subset-target', 'style_data_conditional')], + [Input('table-subset-target', "page_current"), + Input('table-subset-target', "page_size"), + Input('table-subset-target', "sort_by"), + Input('table-subset-target', 'filter_query')], + [State('url', 'search'), + State('url', 'hash')] +) +def update_table_subset(page_current, page_size, sort_by, filter, search, hash_guide): + ''' + La funzione ritorna uno split dei risultati in base ad un filtering o a un sort da parte dell'utente. Inoltre aggiorna i risultati + visualizzati quando il bottone next page / prev page è cliccato. (Codice preso dalla pagina dash datatable sul sorting con python) + Inoltre carica i file targets, o scores se presente, e lo trasforma in un dataframe, cambiando il nome delle colonne per farle corrispondere + all'id delle colonne della tabella nella pagina. + Se non ci sono targets ritorna un avviso di errore + ''' + job_id = search.split('=')[-1] + job_directory = 'Results/' + job_id + '/' + #guide = hash_guide.split('#')[1] + value = job_id + if search is None: + raise PreventUpdate + filtering_expressions = filter.split(' && ') + #filtering_expressions.append(['{crRNA} = ' + guide]) + guide = hash_guide[1:hash_guide.find('new')] + mms = hash_guide[-1:] + bulge_s = hash_guide[-2:-1] + if 'DNA' in hash_guide: + bulge_t = 'DNA' + elif 'RNA' in hash_guide: + bulge_t = 'RNA' + else: + bulge_t = 'X' + df = global_store_subset(value, bulge_t, bulge_s, mms, guide) + dff = df + dff.rename(columns ={0:'Bulge Type', 1:'crRNA', 2:'DNA', 3:'Chromosome', 4:'Position', 5:'Cluster Position', 6:'Direction', + 7:'Mismatches', 8:'Bulge Size', 9:'Total', 10:'Min_mismatches', 11:'Max_mismatches', 12: 'PAM disruption', 13:'PAM creation', 14 : 'Variant unique', 15:'Samples'} , inplace = True) + + # sort_by.insert(0, {'column_id' : 'Mismatches', 'direction': 'asc'}) + # sort_by.insert(1, {'column_id' : 'Bulge Size', 'direction': 'asc'}) + #sort_by.insert(2, {'column_id': 'CFD', 'direction':'desc'}) + for filter_part in filtering_expressions: + col_name, operator, filter_value = split_filter_part(filter_part) + + if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): + # these operators match pandas series operator method names + dff = dff.loc[getattr(dff[col_name], operator)(filter_value)].sort_values([col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False) + elif operator == 'contains': + dff = dff.loc[dff[col_name].str.contains(filter_value)] + elif operator == 'datestartswith': + # this is a simplification of the front-end filtering logic, + # only works with complete fields in standard format + dff = dff.loc[dff[col_name].str.startswith(filter_value)] + + #NOTE sort_by: [{'column_id': 'BulgeType', 'direction': 'asc'}, {'column_id': 'crRNA', 'direction': 'asc'}] + #sort_by.insert(0, {'column_id' : 'Mismatches', 'direction': 'asc'}) + #sort_by.insert(0, {'column_id' : 'BulgeSize', 'direction': 'asc'}) + if len(sort_by): + dff = dff.sort_values( + [col['column_id'] for col in sort_by], + ascending=[ + col['direction'] == 'asc' + for col in sort_by + ], + inplace=False + ) + + + cells_style = [ + { + 'if': { + 'filter_query': '{Variant unique} eq y', + #'filter_query': '{Direction} eq +', + #'column_id' :'Bulge Type' + }, + #'border-left': '5px solid rgba(255, 26, 26, 0.9)', + 'background-color':'rgba(255, 0, 0,0.15)'#'rgb(255, 102, 102)' + + }, + # { + # 'if': { + # 'filter_query': '{Variant unique} eq n', + # 'column_id' :'Bulge Type' + # }, + # 'border-left': '5px solid rgba(26, 26, 255, 0.9)', + + # } + + ] + return dff.iloc[ + page_current*page_size:(page_current+ 1)*page_size + ].to_dict('records'), cells_style + + +#Return the targets found for the selected sample +def samplePage(job_id, hash): + guide = hash[:hash.find('-Sample-')] + sample = hash[hash.rfind('-') + 1:] + with open('Results/' + job_id + '/Params.txt') as p: + all_params = p.read() + genome_type_f = (next(s for s in all_params.split('\n') if 'Genome_selected' in s)).split('\t')[-1] + ref_comp = (next(s for s in all_params.split('\n') if 'Ref_comp' in s)).split('\t')[-1] + + genome_type = 'ref' + if '+' in genome_type_f: + genome_type = 'var' + if 'True' in ref_comp: + genome_type = 'both' + + final_list = [] + final_list.append( + #html.P('List of Targets found for the selected Sample - ' + sample + ' - and guide - ' + guide + ' -') + html.H3('Selected Sample: ' + sample) + ) + final_list.append(html.P('List of Targets found for the selected sample')) + # col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Direction', 'Mismatches', 'Bulge Size', 'Total', 'Min_mismatches', 'Max_mismatches', 'PAM disruption', 'PAM creation', 'Variant unique', 'Samples'] + + if genome_type == 'var': + col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Cluster Position' ,'Direction', 'Mismatches', 'Bulge Size', 'Total', 'Min_mismatches', 'Max_mismatches', 'PAM disruption', 'Samples'] + col_type = ['text','text','text','text','numeric', 'numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'text'] + else: + col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Cluster Position','Direction', 'Mismatches', 'Bulge Size', 'Total', 'Min_mismatches', 'Max_mismatches', 'PAM disruption', 'PAM creation', 'Variant unique', 'Samples'] + col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'text', 'text', 'text', 'text'] + + subprocess.call(['grep \'' + sample + '\' ' + 'Results/'+ job_id + '/' + job_id + '.top_1.samples.txt > Results/'+ job_id + '/' + job_id + '.' + sample + '.' + guide + '.txt' ], shell = True) + df = pd.read_csv('Results/'+ job_id + '/' + job_id + '.' + sample + '.' + guide + '.txt', sep = '\t', names = col_list) + df.drop(df.columns[[-1,]], axis=1, inplace=True) #NOTE comment to show the sample column (maybe not informative in this view) + del col_list[-1] #NOTE comment to show the sample column (maybe not informative in this view) + cols = [{"name": i, "id": i, 'type':t, 'hideable':True} for i,t in zip(col_list, col_type)] + + final_list.append( #TODO add margin bottom 1rem to toggle button and prev-next buttons + html.Div( + dash_table.DataTable( + id='table-sample-target', + columns=cols, + data = df.to_dict('records'), + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + #fixed_columns = {'headers': True, 'data':1}, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='', + style_table={ + 'height': '600px' + #'overflowY': 'scroll', + }, + style_data_conditional=[ + { + 'if': { + 'filter_query': '{Variant unique} eq y', + #'column_id' :'{#Bulge type}', + #'column_id' :'{Total}' + }, + #'border-left': '5px solid rgba(255, 26, 26, 0.9)', + 'background-color':'rgba(255, 0, 0,0.15)'#'rgb(255, 102, 102)' + + } + ], + css= [{ 'selector': 'td.cell--selected, td.focused', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;' }, { 'selector': 'td.cell--selected *, td.focused *', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;'}], + # css= [{ 'selector': 'td.row--selected, td.focused', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;' }, { 'selector': 'td.row--selected *, td.focused *', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;'}], + + ), + id = 'div-result-table', + ) + ) + return html.Div(final_list, style = {'margin':'1%'}) + + +#Return the targets for the selected cluster +def clusterPage(job_id, hash): + guide = hash[:hash.find('-Pos-')] + chr_pos = hash[hash.find('-Pos-') + 5:] + chromosome = chr_pos.split('-')[0] + position = chr_pos.split('-')[1] + with open('Results/' + job_id + '/Params.txt') as p: + all_params = p.read() + genome_type_f = (next(s for s in all_params.split('\n') if 'Genome_selected' in s)).split('\t')[-1] + ref_comp = (next(s for s in all_params.split('\n') if 'Ref_comp' in s)).split('\t')[-1] + + genome_type = 'ref' + if '+' in genome_type_f: + genome_type = 'var' + if 'True' in ref_comp: + genome_type = 'both' + final_list = [] + final_list.append( + html.H3('Selected Position: ' + chromosome + ' - ' + position) + ) + final_list.append(html.P('List of Targets found for the selected position')) + + if genome_type == 'ref': + col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Cluster Position' ,'Direction', 'Mismatches', 'Bulge Size', 'Total'] + col_type = ['text','text','text','text','numeric', 'numeric','text','numeric', 'numeric', 'numeric'] + file_to_grep = 'targets.cluster' + elif genome_type == 'var': + col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Cluster Position' ,'Direction', 'Mismatches', 'Bulge Size', 'Total', 'Min_mismatches', 'Max_mismatches', 'PAM disruption'] + col_type = ['text','text','text','text','numeric', 'numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'text'] + file_to_grep = 'targets.cluster.minmaxdisr' + else: + col_list = ['Bulge Type', 'crRNA', 'DNA', 'Chromosome', 'Position', 'Cluster Position','Direction', 'Mismatches', 'Bulge Size', 'Total', 'Min_mismatches', 'Max_mismatches', 'PAM disruption', 'PAM creation', 'Variant unique', 'Samples'] + col_type = ['text','text','text','text','numeric','text','numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'text', 'text', 'text', 'text', 'text'] + + subprocess.call(['grep -P \'\\t'+ guide[0]+ '.*\\t.*\\t' + chromosome + '\\t.*\\t' + position + '\\t\' Results/' + job_id + '/' + job_id + '.' + file_to_grep + '.txt > Results/' + job_id + '/' + job_id + '.' + chromosome + '_' + position + '.txt'], shell = True) + + df = pd.read_csv('Results/' + job_id + '/' + job_id + '.' + chromosome + '_' + position + '.txt', sep = '\t', names = col_list) + cols = [{"name": i, "id": i, 'type':t, 'hideable':True} for i,t in zip(col_list, col_type)] + + final_list.append( + html.Div( + dash_table.DataTable( + id='table-position-target', + columns=cols, + data = df.to_dict('records'), + virtualization = True, + fixed_rows={ 'headers': True, 'data': 0 }, + #fixed_columns = {'headers': True, 'data':1}, + style_cell={'width': '150px'}, + page_current=0, + page_size=PAGE_SIZE, + page_action='custom', + sort_action='custom', + sort_mode='multi', + sort_by=[], + filter_action='custom', + filter_query='', + style_table={ + 'height': '600px' + #'overflowY': 'scroll', + }, + style_data_conditional=[ + { + 'if': { + 'filter_query': '{Variant unique} eq y', + #'column_id' :'{#Bulge type}', + #'column_id' :'{Total}' + }, + #'border-left': '5px solid rgba(255, 26, 26, 0.9)', + 'background-color':'rgba(255, 0, 0,0.15)'#'rgb(255, 102, 102)' + + } + ], + export_format = 'csv', + css= [{ 'selector': 'td.cell--selected, td.focused', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;' }, { 'selector': 'td.cell--selected *, td.focused *', 'rule': 'background-color: rgba(0, 0, 255,0.15) !important;'}], + + ), + id = 'div-result-table', + ) + ) + return html.Div(final_list, style = {'margin':'1%'}) + +if __name__ == '__main__': + #app.run_server(debug=True) + app.run_server(host='0.0.0.0', debug=True, port=8080) + cache.clear() #delete cache when server is closed + + #BUG quando faccio scores, se ho dei char IUPAC nei targets, nel terminale posso vedere 150% 200% etc perche' il limite massimo e' basato su wc -l dei targets, ma possono aumentare se ho molti + #Iupac + #TODO se cancello il chr nel filter by position, non vedo nessun risultato -> fare in modo che metta risultati originali -> da controllare con risultati più grandi diff --git a/OldScripts/calc_samples.py b/OldScripts/calc_samples.py new file mode 100644 index 0000000..4dc439a --- /dev/null +++ b/OldScripts/calc_samples.py @@ -0,0 +1,174 @@ +''' +Carica il dizionario, per ogni linea del file result (top1): +estrae i samples e salva la posizione nel target e nel chr, la tupla (var, ref), +crea tutte le combinazioni in target_combination +Poi passa tutti gli elementi di target_combination e per ogni pos +dove c'era uno iupac controlla: se c'è un var, prende i samples e fa l'intersezione + +''' + +import gzip +import sys +import json +import time +import itertools +# argv1 is dict +# argv is result file +#Load .json dict +total_error = 0 +resu_name = sys.argv[2][:sys.argv[2].rfind('.')] + '.samples.txt' +chr_name = sys.argv[1].split('.json')[0].split('_')[-1] +if True: + start_time = time.time() + if True: + with open(sys.argv[1], 'r') as f: + datastore = json.load(f) + print ('Load done', time.time() - start_time) + #Use the new datastore datastructure + iupac_code = { + "R":("A", "G"), + "Y":("C", "T"), + "S":("G", "C"), + "W":("A", "T"), + "K":("G", "T"), + "M":("A", "C"), + "B":("C", "G", "T"), + "D":("A", "G", "T"), + "H":("A", "C", "T"), + "V":("A", "C", "G"), + "r":("A", "G"), + "y":("C", "T"), + "s":("G", "C"), + "w":("A", "T"), + "k":("G", "T"), + "m":("A", "C"), + "b":("C", "G", "T"), + "d":("A", "G", "T"), + "h":("A", "C", "T"), + "v":("A", "C", "G"), + 'N':('A', 'T', 'C', 'G') + } + total_error = 0 + with open (sys.argv[2]) as targets, open(resu_name,'a+') as result: + #header = targets.readline() + for line in targets: + if '#' in line: + continue + line = line.strip().split('\t') + #returned_none = False + pos_snp = [] + var = [] + target_combination = [] + pos_snp_chr = [] + if line[3] == chr_name: + set_list = [] + target_string = line[2] + if line[6] == '-': + target_string = target_string[::-1] + bulge_found = 0 + for pos, char in enumerate(target_string): + if char == '-': + bulge_found = bulge_found + 1 + if char in iupac_code: + iupac_pos = str(int(line[4]) + pos + 1 - bulge_found) + try: + a = (datastore[chr_name + ',' + iupac_pos]) #NOTE se non ha samples, ritorna ;ref,var + + #ref_char = a[-4:][1] + #var_char = a[-4:][3] + ref_char = a.split(';')[-1].split(',')[0] + var_char = a.split(';')[-1].split(',')[1] + # a = a[:-4] + a = a.split(';')[0] + pos_snp.append(pos) + pos_snp_chr.append(iupac_pos) + #var.append((ref_char,var_char)) + var.append((var_char, ref_char)) + except Exception as e: + print(e) + print('Error at ' + '\t'.join(line) + ', with char ' + char + ', at pos ', iupac_pos) + #sys.exit() + total_error = total_error + 1 + #print(set(a.split(','))) + if a: + set_list.append(set(a.split(','))) + else: + #print('Error None ', line, a) + set_list.append(set()) + #pass + #returned_none = True + + #Create all combinations + for i in itertools.product(*var): + t = list(target_string) + for p, el in enumerate(pos_snp): + t[el] = i[p] + target_combination.append(''.join(t)) + # print(target_combination) + + samples_already_assigned = set() + #print('QUI:', pos_snp_chr) + for t in target_combination: + set_list2 = [] + final_result = line.copy() + for ele_pos,p in enumerate(pos_snp_chr): + #print('pos_chr', p) + a = (datastore[chr_name + ',' + p]) + #print('a', a) + + samples = a.split(';')[0] #a[:-4] + + ref = a.split(';')[-1].split(',')[0] + var = a.split(';')[-1].split(',')[1] + # if int(p) == 10353471 or int(p) == 10353474: + # print(p) + # print('final result', final_result) + # print('Samp, ref, var: ',samples, ref, var) + #print('char in pos',t[pos_snp[ele_pos]].upper()) + if t[pos_snp[ele_pos]].upper() == var: + if samples: + set_list2.append(set(samples.split(','))) + else: + #print('Error None ', line, a) + set_list2.append(set()) + # #returned_none = True + # if int(p) == 10353471 or int(p) == 10353474: + # print('Set list2', set_list2 ) + if set_list2: + #print('setlist2', set_list2) + common_samples = set.intersection(*set_list2) + common_samples = common_samples - samples_already_assigned + # print('common samples', common_samples) + samples_already_assigned = samples_already_assigned.union(common_samples) + # print('samp already assigned', samples_already_assigned) + # print('common_smples', common_samples) + if common_samples: + final_result.append(','.join(common_samples)) + else: + # final_result.append('No common samples') + final_result = [] #DO not save results without samples + else: + # final_result.append('No samples') #DO not save results without samples + final_result = [] + # print('final_res', final_result) + if line[6] == '-': + t = t[::-1] + if final_result: + final_result[2] = t + result.write('\t'.join(final_result) + '\n') + + #print(final_result) + + #Total intersection + # if set_list: + # common_samples = set.intersection(*set_list) + # if common_samples: + # line.append(','.join(common_samples)) + # else: + # line.append('No common samples') + # else: + # line.append('No samples') + # result.write('\t'.join(line) + '\n') + +print ('Done') +print (total_error) diff --git a/OldScripts/calc_samples_faster.py b/OldScripts/calc_samples_faster.py new file mode 100644 index 0000000..6b7e9bc --- /dev/null +++ b/OldScripts/calc_samples_faster.py @@ -0,0 +1,203 @@ +''' +In input prende top_1 ordinato per cromosomi. Prende la prima riga e carica il dizionario +corrispondente. Quando legge una nuova riga e il chr cambia, scarica il primo dizionario e +apre quello nuovo. Per ogni linea : +estrae i samples e salva la posizione nel target e nel chr, la tupla (var, ref), +crea tutte le combinazioni in target_combination +Poi passa tutti gli elementi di target_combination e per ogni pos +dove c'era uno iupac controlla: se c'è un var, prende i samples e fa l'intersezione + +''' +#Test, creo anche un file dove per ogni target ho l'unione dei samples, serve per l'annotazione + +import gzip +import sys +import json +import time +import itertools +import os +# argv1 is dict directory +# argv is result file +#Load .json dict +total_error = 0 +resu_name = sys.argv[2][:sys.argv[2].rfind('.')] + '.samples.txt' +test_resu_name = sys.argv[2][:sys.argv[2].rfind('.')] + '.samples.all.txt' #Put in a line all the samples, not the intersection, keep original line +chr_name = sys.argv[1].split('.json')[0].split('_')[-1] + +current_chr = 'no' +chr_name = 'no' + +def rev_comp(a): + if a == 'A' or a == 'a': + return 'T' + if a == 'T' or a == 't': + return 'A' + if a == 'C' or a == 'c': + return 'G' + return 'C' + +if True: + # start_time = time.time() + # if True: + # with open(sys.argv[1], 'r') as f: + # datastore = json.load(f) + # print ('Load done', time.time() - start_time) + #Use the new datastore datastructure + iupac_code = { + "R":("A", "G"), + "Y":("C", "T"), + "S":("G", "C"), + "W":("A", "T"), + "K":("G", "T"), + "M":("A", "C"), + "B":("C", "G", "T"), + "D":("A", "G", "T"), + "H":("A", "C", "T"), + "V":("A", "C", "G"), + "r":("A", "G"), + "y":("C", "T"), + "s":("G", "C"), + "w":("A", "T"), + "k":("G", "T"), + "m":("A", "C"), + "b":("C", "G", "T"), + "d":("A", "G", "T"), + "h":("A", "C", "T"), + "v":("A", "C", "G"), + 'N':('A', 'T', 'C', 'G') + } + total_error = 0 + with open (sys.argv[2]) as targets, open(resu_name,'w+') as result, open(test_resu_name, 'w+') as test_result: + for line in targets: + if '#' in line: + continue + line = line.strip().split('\t') + if line[3] != current_chr: + if not os.path.exists(os.path.realpath(sys.argv[1]) + '/my_dict_' + line[3] + '.json'): + test_result.write('\t'.join(line) + '\t' + 'n' + '\t' +line[1].replace('-','') + '\n') + continue + print('Done ', current_chr) + current_chr = line[3] + chr_name = line[3] + with open(os.path.realpath(sys.argv[1]) + '/my_dict_' + current_chr + '.json', 'r') as f: + start_time = time.time() + datastore = json.load(f) + print ('Load ' + current_chr + ' done', time.time() - start_time) + + pos_snp = [] + var = [] + target_combination = [] + pos_snp_chr = [] + # if line[3] == chr_name: + set_list = [] + target_string = line[2] + if line[6] == '-': + target_string = target_string[::-1] + bulge_found = 0 + for pos, char in enumerate(target_string): + if char == '-': + bulge_found = bulge_found + 1 + if char in iupac_code: + iupac_pos = str(int(line[4]) + pos + 1 - bulge_found) + try: + a = (datastore[chr_name + ',' + iupac_pos]) #NOTE se non ha samples, ritorna ;ref,var + + #ref_char = a[-4:][1] + #var_char = a[-4:][3] + ref_char = a.split(';')[-1].split(',')[0] + var_char = a.split(';')[-1].split(',')[1] + if line[6] == '-': + ref_char = rev_comp(ref_char) + var_char = rev_comp(var_char) + # a = a[:-4] + a = a.split(';')[0] + pos_snp.append(pos) + pos_snp_chr.append(iupac_pos) + #var.append((ref_char,var_char)) + var.append((var_char, ref_char)) + except Exception as e: + print(e) + print('Error at ' + '\t'.join(line) + ', with char ' + char + ', at pos ', iupac_pos) + #sys.exit() + total_error = total_error + 1 + #print(set(a.split(','))) + if a: + set_list.append(set(a.split(','))) + else: + #print('Error None ', line, a) + set_list.append(set()) + #pass + #returned_none = True + #TEST save line and union of all samples + union_sample = list(set().union(*set_list)) + if union_sample: + test_result.write('\t'.join(line) + '\t' + ','.join(union_sample) + '\t' + line[1].replace('-','') +'\n') + else: + test_result.write('\t'.join(line) + '\t' + 'n' + '\t' +line[1].replace('-','') + '\n') + #Create all combinations + for i in itertools.product(*var): + t = list(target_string) + for p, el in enumerate(pos_snp): + t[el] = i[p] + target_combination.append(''.join(t)) + # print(target_combination) + + samples_already_assigned = set() + + #print('QUI:', pos_snp_chr) + for t in target_combination: + set_list2 = [] + final_result = line.copy() + for ele_pos,p in enumerate(pos_snp_chr): + #print('pos_chr', p) + a = (datastore[chr_name + ',' + p]) + #print('a', a) + + samples = a.split(';')[0] #a[:-4] + + ref = a.split(';')[-1].split(',')[0] + var = a.split(';')[-1].split(',')[1] + if line[6] == '-': + ref = rev_comp(ref) + var = rev_comp(var) + # if int(p) == 10353471 or int(p) == 10353474: + # print(p) + # print('final result', final_result) + # print('Samp, ref, var: ',samples, ref, var) + #print('char in pos',t[pos_snp[ele_pos]].upper()) + if t[pos_snp[ele_pos]].upper() == var: + if samples: + set_list2.append(set(samples.split(','))) + else: + #print('Error None ', line, a) + set_list2.append(set()) + # #returned_none = True + # if int(p) == 10353471 or int(p) == 10353474: + # print('Set list2', set_list2 ) + if set_list2: + #print('setlist2', set_list2) + common_samples = set.intersection(*set_list2) + common_samples = common_samples - samples_already_assigned + # print('common samples', common_samples) + samples_already_assigned = samples_already_assigned.union(common_samples) + # print('samp already assigned', samples_already_assigned) + # print('common_smples', common_samples) + if common_samples: + final_result.append(','.join(common_samples)) + else: + # final_result.append('No common samples') + final_result = [] #DO not save results without samples + else: + # final_result.append('No samples') #DO not save results without samples + final_result = [] + # print('final_res', final_result) + if line[6] == '-': + t = t[::-1] + if final_result: + final_result[2] = t + result.write('\t'.join(final_result) + '\t' + final_result[1].replace('-','') + '\n') #final_result[1].replace('-','') for better grep + + #print(final_result) + +print ('Done') +print (total_error) diff --git a/OldScripts/cluster.dict.old.py b/OldScripts/cluster.dict.old.py new file mode 100644 index 0000000..e683ad4 --- /dev/null +++ b/OldScripts/cluster.dict.old.py @@ -0,0 +1,142 @@ +#test1 +#script per testare la lettura del file semicommon con pandas, creare una nuova colonna total, chr, pos corretta (ovver la pos senza i bulges), +# e vedere se è fattibile in termini di tempo e memoria -> non fattibile in termini di tempo + +#test2 +#Leggo il file riga per riga, aggiungo le colonne e salvo la lista in una lista, che poi ordino -> ok, 1gb input -> 1.5 min e 11 gb ram +# Input 1.2 Gb -> tempo 2 min; 11Gb RAM + +#test 3 +#Come test2, ma ogni lista è salvata in un dizionario con key = guide, per risolvere il problema di avere più guide in uno stesso cluster. +#Ora se in una posizione ho due guide, creo due cluster separati +# Input 2.8 Gb -> 25 gb ram +# Problema con 5 gb input -> forse servono 50 Gb ram + +#sys1 è target file +#sys2 is 'addGuide' or 'no' -> only for web server, only for search with only ref +#sys3 is True to keep column 5 (Pos cluster) and 9 (Total) and added guide, False to do clusterization but do not report the added columns +#sys4 is True if cluster only (no append of Total column or adding of Cluster position, because already present), False otherwise +#Output column (not written): Bulge_type, Guide, Target, chr, pos, pos_cluster (optional), direction, mms, bulge, total(optional), real guide(optional) + +import time +import sys + +start = time.time() +total_targets = [] +guides_dict = dict() +addGuide = False #Add real guide to last column for better grep +if 'addGuide' in sys.argv[:]: + addGuide = True +if sys.argv[3] == 'True': + keep_columns = True +else: + keep_columns = False + +result_name = sys.argv[1][:sys.argv[1].rfind('.')] + '.cluster.txt' +cluster_only = False +if sys.argv[4] == 'True': + cluster_only = True + +with open (sys.argv[1]) as targets: + for line in targets: + if '#' in line: + continue + line = line.strip().split('\t') + if not cluster_only: + line.append(str(int(line[6]) + int(line[7]))) + if line[5] == '+': + if line[0] == 'DNA': + # line.append(str(int(line[4]) + int(line[7]))) + line.insert(5, str(int(line[4]) + int(line[7]))) + else: + # line.append(str(int(line[4]) - int(line[7]))) + line.insert(5,str(int(line[4]) - int(line[7]))) + else: + # line.append(line[4]) + line.insert(5, line[4]) + try: + guides_dict[line[1].replace('-','')].append(line) + except: + guides_dict[line[1].replace('-','')] = [line] + #total_targets.append(line) + +if not cluster_only: + print('Created \'Total\' and \'Position Cluster\' columns:', time.time() - start) +else: + print('Loaded targets: ', time.time() - start) + + + +start = time.time() +# total_targets.sort(key = lambda x: ( x[3] , int(x[-1]) )) +for k in guides_dict.keys(): + guides_dict[k].sort(key = lambda x: ( x[3] , int(x[5]) )) +#total_targets.sort(key = lambda x: ( x[3] , int(x[5]) )) + +print('Targets sorted:', time.time() - start) + +print('Start clustering') +start_time = time.time() + +with open(result_name, 'w+') as result: + if 'total' not in sys.argv[:]: #TODO fix for offline release, praticamente se sto facendo cluster su total.txt metto l'header custom + if addGuide: + if keep_columns: + result.write('#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tCluster Position\tDirection\tMismatches\tBulge_Size\tTotal\tReal Guide\n') + else: + result.write('#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tDirection\tMismatches\tBulge_Size\tReal Guide\n') + else: + if keep_columns: + result.write('#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tCluster Position\tDirection\tMismatches\tBulge_Size\tTotal\n') + else: + result.write('#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tDirection\tMismatches\tBulge_Size\n') + else: + result.write('#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tCluster Position\tDirection\tMismatches\tBulge_Size\tTotal\tMin_mismatches\tMax_mismatches\tPam_disr\tPAM_gen\tVar_uniq\n') + + total_targets = [] + for k in guides_dict.keys(): + total_targets += guides_dict[k] + total_list = [] + + first_line = total_targets[0] + # current_chr_pos = first_line[3] + ' ' + first_line[9] + current_chr_pos = first_line[3] + ' ' + first_line[5] + + total_list.append([first_line]) + + for line in total_targets[1:]: + #if line[3] + ' ' + line[9] != current_chr_pos: + if line[3] + ' ' + line[5] != current_chr_pos: + # total_list[-1].sort(key = lambda x: int(x[8])) + total_list[-1].sort(key = lambda x: int(x[9])) + total_list.append([line]) + # current_chr_pos = line[3] + ' ' + line[9] + current_chr_pos = line[3] + ' ' + line[5] + else: + total_list[-1].append(line) + + total_list[-1].sort(key = lambda x: int(x[9])) + + total_list.sort(key = lambda x: int(x[0][9])) + if addGuide: + if keep_columns: + for cluster in total_list: + for target in cluster: + result.write('\t'.join(target) + '\t' + target[1].replace('-','') + '\n') + else: + for cluster in total_list: + for target in cluster: + result.write('\t'.join(target[0:5] + target[6:-1]) + '\t' + target[1].replace('-','') + '\n') + + else: + if keep_columns: + for cluster in total_list: + for target in cluster: + result.write('\t'.join(target) + '\n') + else: + for cluster in total_list: + for target in cluster: + result.write('\t'.join(target[0:5] + target[6:-1]) + '\n') + + +print("Clustering runtime: %s seconds" % (time.time() - start_time)) \ No newline at end of file diff --git a/OldScripts/ext_seq_pam_creation.sh b/OldScripts/ext_seq_pam_creation.sh new file mode 100644 index 0000000..732791e --- /dev/null +++ b/OldScripts/ext_seq_pam_creation.sh @@ -0,0 +1,12 @@ +# $1 is genome directory +# $2 is bedfile.bed +mkdir tmp_seq +for chr in $1/*.fa; do + chr_name=$(basename $chr) + bedtools getfasta -fi $chr -bed $2 -fo tmp_seq/$chr_name'_seq.txt' 2>/dev/null +done +find ./tmp_seq -type f -empty -delete + +for extr in tmp_seq/*_seq.txt; do + sed -i '1~2d' $extr +done \ No newline at end of file diff --git a/OldScripts/extract_subcluster.awk b/OldScripts/extract_subcluster.awk new file mode 100644 index 0000000..760ed38 --- /dev/null +++ b/OldScripts/extract_subcluster.awk @@ -0,0 +1,6 @@ + {if ($8==mm && $9==b) + if (($4$6)==g) + print $0"\ts"; + else { + print $0"\tt"; g=$4$6;}} + diff --git a/OldScripts/generate_report_samples.py b/OldScripts/generate_report_samples.py new file mode 100644 index 0000000..c63f573 --- /dev/null +++ b/OldScripts/generate_report_samples.py @@ -0,0 +1,780 @@ +#!/usr/bin/env python + + +''' +Not used +''' +# Esempio di input: +#python 3 new_radar_chart GUIDE -mm 4 -profile profile.xls -extprofile extprofile.xls -files ... ... ... ... -> creo radar chart +#python 3 new_radar_chart GUIDE -mm 4 -profile profile.xls -extprofile extprofile.xls -files ... ... ... ... -sumref ... -sumenr ... -> creo radar chart + barplot di quella guida +#python 3 new_radar_chart -mm 4 -sumref ... -sumenr ... -> creo barplot totale +# Input: +# guide, mm, profile, extprofile, annotation_file, summaryone, summarytwo, gecko profile, gecko exons, gecko introns, gecko promoter, gecko dnase, geckoctcf + +#BUG 428 quando guida non ha trovato targets +# Libraries +import math +import matplotlib +matplotlib.use("TkAgg") +from matplotlib import pyplot as plt +from matplotlib import patches as mpatches +import pandas as pd +from math import pi +import scipy.spatial.distance as sp +import numpy as np +import sys +from itertools import islice +import glob +import warnings +import os +from os import listdir +from os.path import isfile, join +warnings.filterwarnings("ignore") + +plt.style.use('seaborn-poster') +matplotlib.rcParams['pdf.fonttype'] = 42 +matplotlib.rcParams['ps.fonttype'] = 42 + +only_radar = False +radar_barplot = False +only_barplot = False + +guide = sys.argv[1] +if '-' in sys.argv[2]: + missmatch = sys.argv[2].split('-') + lowermm = int(missmatch[0]) + uppermm = int(missmatch[1]) +else: + missmatch = int(sys.argv[2]) + lowermm = 0 + uppermm = missmatch + +profile_file = sys.argv[3] +ext_profile_file = sys.argv[4] +count_dir = sys.argv[5] +summary_one = sys.argv[6] +summary_two = sys.argv[7] #is the same as count_dir + +try: + gecko_profile_file = sys.argv[8] + gecko_exons = sys.argv[9] + gecko_introns = sys.argv[10] + gecko_promoter = sys.argv[11] + gecko_dnase = sys.argv[12] + gecko_ctcf = sys.argv[13] +except: + pass + +if guide == 'no': + only_barplot = True +elif summary_one != 'no' and summary_two != 'no': + radar_barplot = True +else: + only_radar = True + + +# lettura file +with open(profile_file) as profile: + num_guides = profile.read().strip().split('\n')[1:] + num_guides = len (num_guides) +with open(count_dir) as annotation_file: + content = annotation_file.read().strip() + onlyfiles = content.split('-Summary_Total\n')[0].split('-') + summary_two = content.split('-Summary_Total\n')[1] + if guide == 'no': + summary_two = summary_two.strip().split('-')[0].strip().split('\n') + + else: + summary_two = summary_two.strip().split('-Summary_')[1:] + summary_two = [s.strip() for s in summary_two if guide in s] + if not summary_two: + print('Warning! The selected guide has no annotated results') + quit() + summary_two = summary_two[0].split('\n')[1:] + + +if summary_one != 'no': + with open(summary_one) as annotation_file: + content = annotation_file.read().strip() + summary_one = content.split('-Summary_Total\n')[1] + if guide == 'no': + summary_one = summary_one.strip().split('-')[0].strip().split('\n') + else: + summary_one = summary_one.strip().split('-Summary_')[1:] + summary_one = [s.strip() for s in summary_one if guide in s] + summary_one = summary_one[0].split('\n')[1:] + +count_files = [] +inGuidesProfileExtended = open(ext_profile_file, 'r') +inGuidesProfile = open(profile_file, 'r') +onlyfiles.sort() + +for i in onlyfiles: + if i == '': + continue + i = i.strip() + element_name = i[:i.find('\n')] #i[i[:i.find('Count')].rfind('.') +1 : i.find('Count')] + count_files.append([element_name, i.split('\n')[1:]]) + +check_annotation_name = [] +for i in count_files: + check_annotation_name.append(i[0].lower()) + +if check_annotation_name != ['ctcf', 'dnase', 'exon', 'intron', 'promoter'] and gecko_profile_file != "no": #NOTE if gecko annotations are updated, update the list adding the new annotations in lexico order + print('Warning! Option \'-gecko\' can be used on files annotated with \'ctcf\', \'dnase\', \'exon\', \'intron\', \'promoter\'\nRemoving \'-gecko\' option') + gecko_profile_file = 'no' +#count_files is now ['Element', [first_guide, second_guide...]] ... + +# if exonsCountFile != "no": +# inExonsCountFile = open(exonsCountFile, "r") +# if intronsCountFile != "no": +# inIntronsCountFile = open(intronsCountFile, "r") +# if promotersCountFile != "no": +# inPromotersCountFile = open(promotersCountFile, "r") +# if DNAseCountFile != "no": +# inDNAseCountFile = open(DNAseCountFile, "r") +# if DNAseCountFile != "no": +# inCTCFCountFile = open(CTCFCountFile, "r") + +# lists for data storing and analysis +guidesExtendedProfile = [] + +# global counting for annotation types + +profileMissmatchGlobal = [] +# exonsMissmatchGlobal = [] +# intronsMissmatchGlobal = [] +# promotersMissmatchGlobal = [] +# DNAseMissmatchGlobal = [] +# CTCFMissmatchGlobal = [] + +arraySummaryCountOne = [] +arraySummaryCountTwo = [] + + +if summary_one != "no" and summary_two != "no": + inSummaryCountOne = summary_one #open(summary_one, "r") + inSummaryCountTwo = summary_two #open(summary_two, "r") + + x_ticks_labels = [] + n_annotation = 0 + mms_total = 0 #len of the array exon 0 0 0 0 0 0 0, taken from [1:] + for line in inSummaryCountOne: + x = line.strip().split('\t') + arraySummaryCountOne.append(tuple(x[1:])) + x_ticks_labels.append(x[0]) + n_annotation = n_annotation + 1 + mms_total = len(x[1:]) + + + for line in inSummaryCountTwo: + x = line.strip().split('\t') + arraySummaryCountTwo.append(tuple(x[1:])) + arraySummaryCountOne = np.array(arraySummaryCountOne, dtype=int) + arraySummaryCountOne.shape = (n_annotation, mms_total) + + arraySummaryCountTwo = np.array(arraySummaryCountTwo, dtype=int) + arraySummaryCountTwo.shape = (n_annotation, mms_total) + + percentageGain = [] + for row in range(0, n_annotation): + for col in range(0, uppermm+1): + res = max((arraySummaryCountOne[row, col]/arraySummaryCountTwo[row, col]), + (arraySummaryCountTwo[row, col]/arraySummaryCountOne[row, col])) + percentageGain.append(res) + percentageGain = np.array(percentageGain, dtype=float) + percentageGain.shape = (n_annotation, uppermm+1) + + intergenicGainOne = (np.sum(arraySummaryCountOne, axis=0)) + intergenicGainTwo = (np.sum(arraySummaryCountTwo, axis=0)) + intergenicGainOne.shape = (1, mms_total) + intergenicGainTwo.shape = (1, mms_total) + intergenicGainOne = intergenicGainOne - arraySummaryCountOne[0, :] + intergenicGainTwo = intergenicGainTwo - arraySummaryCountTwo[0, :] + + # p1 = plt.bar(1, percentageGain[0, uppermm-1]) + # p2 = plt.bar(2, percentageGain[1, uppermm-1]) + # p3 = plt.bar(3, percentageGain[2, uppermm-1]) + # p4 = plt.bar(4, percentageGain[3, uppermm-1]) + # p5 = plt.bar(5, percentageGain[4, uppermm-1]) + # p6 = plt.bar(6, percentageGain[5, uppermm-1]) + # p7 = plt.bar(7, max((intergenicGainOne[0, uppermm-1]/intergenicGainTwo[0, uppermm-1]), + # (intergenicGainTwo[0, uppermm-1]/intergenicGainOne[0, uppermm-1]))) + + ind = np.arange(0, n_annotation, 1) + # print('math ceil', np.arange(0, max(arraySummaryCountTwo[:, uppermm]) + math.ceil(max( + # arraySummaryCountTwo[:, uppermm])/10), + # math.ceil(max(arraySummaryCountTwo[:, uppermm])/5))) #BUG if max(arraySummaryCountTwo[:, uppermm], then arange (0,0,0) + no_result = False + try: + y_range = np.arange(0, max(arraySummaryCountTwo[:, uppermm]) + math.ceil(max( + arraySummaryCountTwo[:, uppermm])/10), math.ceil(max(arraySummaryCountTwo[:, uppermm])/5)) + except: + y_range = np.arange(0,1,1) + no_result = True + width = 0.5 + + p1 = plt.bar( + ind, arraySummaryCountOne[:, uppermm], width, color='#67a9cf', align='edge') + p2 = plt.bar(ind, (arraySummaryCountTwo[:, uppermm]-arraySummaryCountOne[:, uppermm]), + width, bottom=arraySummaryCountOne[:, uppermm], color='#ef8a62', align='edge') + + plt.legend((p1[0], p2[0]), ('Reference Genome', + 'Enriched Genome'), fontsize=30) + + # plt.xlim(0, len(string)) + # plt.set_ylim([1, 1.5]) + plt.title('Relative Increase Enriched/Reference Genome with ' + + str(uppermm) + ' Mismatches', size=25) + # plt.xlabel('Annotations') + if no_result: + plt.annotate('No targets found with ' + str(missmatch) + ' mismatches', [1.35,0], size = 22) #NOTE with 0-mm print only the mm pdf; 1.35 modificare se cambia la str + else: + for k in range(0, n_annotation): + plt.annotate('%.2fx' % percentageGain[k, uppermm], [ + k+0.05, arraySummaryCountTwo[k, uppermm]+(max(arraySummaryCountTwo[:, uppermm])/100)], size=22) + # plt.ylim([0, max(arraySummaryCountTwo[:, uppermm])+2000],size=25) + + + plt.xticks(ind+0.25, x_ticks_labels, size=25) + plt.yticks(y_range, size=22) + + plt.tight_layout() + plt.subplots_adjust(top=0.95, bottom=0.06, left=0.1, right=0.99) + if guide != 'no': + plt.savefig("summary_histogram_" + str(guide) + '_' + str(uppermm) + + "mm" + ".pdf", format="pdf") + else: + plt.savefig("summary_histogram_" + str(uppermm) + + "mm" + ".pdf", format="pdf") + if guide != 'no': + plt.savefig("summary_histogram_" + str(guide) + '_' + str(uppermm) + + "mm" + ".png", format="png") + else: + plt.savefig("summary_histogram_" + str(uppermm) + + "mm" + ".png", format="png") + + +if guide != 'no': + # reading extendend profile to obtain results over mismatches counts + for line in inGuidesProfileExtended: + if ">" + guide in line: + # print(line) + next(inGuidesProfileExtended) + # line=inGuidesProfileExtended.readline() + for ciao in range(0, uppermm+1): + line = inGuidesProfileExtended.readline() + count = 0 + x = line.split('\t') + guidesExtendedProfile.append((x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], + x[10], x[11], x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20])) + for line in inGuidesProfileExtended: + if count < 6: + line = line.rstrip() + x = line.split('\t') + #y = str(x[20]).split('\n') + guidesExtendedProfile.append((x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], + x[10], x[11], x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20])) + count += 1 + else: + break + break + arrayguidesExtendedProfile = np.array(guidesExtendedProfile, dtype=int) + + arrayguidesExtendedProfile.shape = (7*((uppermm-0)+1), 20) + + # reading profile file to obtain results for every mismatch count in the general profile + guide_len = len(guide) + next(inGuidesProfile) + for line in inGuidesProfile: + line += "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + \ + "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + x = line.split('\t') + profileMissmatchGlobal.append((x[guide_len+3], x[guide_len+4], x[guide_len+5], x[guide_len+6], + x[guide_len+7], x[guide_len+8], x[guide_len+9], x[guide_len+10], x[guide_len+11], x[guide_len+12])) + if str(x[0]) == guide: + arrayprofileMissmatch = np.array((x[guide_len+3], x[guide_len+4], x[guide_len+5], x[guide_len+6], + x[guide_len+7], x[guide_len+8], x[guide_len+9], x[guide_len+10], x[guide_len+11], x[guide_len+12]), dtype=int) + + arrayexonsMissmatch = [] + exonsMissmatchGlobal = [] + for pos,element_file in enumerate(count_files): + exonsMissmatchGlobal.append([]) + arrayexonsMissmatch.append([]) + for line in element_file[1]: + line = line.strip() + #line += "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + \ + # "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + x = line.split('\t') + exonsMissmatchGlobal[pos].append(tuple(x[1:])) + if str(x[0]) == guide: + arrayexonsMissmatch[pos].append(np.array( tuple(x[1:]), dtype=int)) + + if(gecko_profile_file != "no"): #NOTE if new annotations files are provided, add them in lexicograph order, following the structure for ctcf, dnase etc etc + inGuidesProfile = open(gecko_profile_file, "r") + inExonsCountFile = open(gecko_exons, "r") + inIntronsCountFile = open(gecko_introns, "r") + inPromotersCountFile = open(gecko_promoter, "r") + inDNAseCountFile = open(gecko_dnase, "r") + inCTCFCountFile = open(gecko_ctcf, "r") + + profileMissmatchGlobal.clear() + # exonsMissmatchGlobal.clear() + # intronsMissmatchGlobal.clear() + # promotersMissmatchGlobal.clear() + # DNAseMissmatchGlobal.clear() + # CTCFMissmatchGlobal.clear() + exonsMissmatchGlobal = [] + # GECKO + # reading profile file to obtain results for every mismatch count in the general profile + guide_len = len(guide) + next(inGuidesProfile) + for line in inGuidesProfile: + line += "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + \ + "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + x = line.split('\t') + profileMissmatchGlobal.append((x[guide_len+3], x[guide_len+4], x[guide_len+5], x[guide_len+6], + x[guide_len+7], x[guide_len+8], x[guide_len+9], x[guide_len+10], x[guide_len+11], x[guide_len+12])) + + # reading every count file to obtain results for the ecdf and percentile count for annotated type + tmp = [] + for line in inCTCFCountFile: + line += "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + \ + "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + x = line.split('\t') + tmp.append( + (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10])) + exonsMissmatchGlobal.append(tmp) + tmp = [] + for line in inDNAseCountFile: + line += "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + \ + "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + x = line.split('\t') + tmp.append( + (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10])) + exonsMissmatchGlobal.append(tmp) + tmp = [] + for line in inExonsCountFile: + line += "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + \ + "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + x = line.split('\t') + tmp.append( + (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10])) + exonsMissmatchGlobal.append(tmp) + tmp = [] + for line in inIntronsCountFile: + line += "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + \ + "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + x = line.split('\t') + tmp.append( + (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10])) + exonsMissmatchGlobal.append(tmp) + tmp = [] + for line in inPromotersCountFile: + line += "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + \ + "0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t"+"0"+"\t" + x = line.split('\t') + tmp.append( + (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10])) + exonsMissmatchGlobal.append(tmp) + + + + if profile_file != "no": + arrayexonsMissmatchGlobal = [] + arrayprofileMissmatchGlobal = np.array(profileMissmatchGlobal, dtype=int) + for elem in exonsMissmatchGlobal: + arrayexonsMissmatchGlobal.append(np.array(elem, dtype = int)) + # arrayexonsMissmatchGlobal = np.array(exonsMissmatchGlobal, dtype=int) + # arrayintronsMissmatchGlobal = np.array(intronsMissmatchGlobal, dtype=int) + # arraypromotersMissmatchGlobal = np.array(promotersMissmatchGlobal, dtype=int) + # arrayDNAseMissmatchGlobal = np.array(DNAseMissmatchGlobal, dtype=int) + # arrayCTCFMissmatchGlobal = np.array(CTCFMissmatchGlobal, dtype=int) + distances = [] + test_distances = [] + Generaldistance = [[0 for x in range(0, uppermm+1)] + for y in range(np.size(arrayprofileMissmatchGlobal, 0))] + for elem in exonsMissmatchGlobal: + distances.append([[0 for x in range(0, uppermm+1)] for y in range(np.size(elem, 0))] ) + test_distances.append([[0 for x in range(0, uppermm+1)] for y in range(np.size(elem, 0))] ) + + # CTCFdistance = [[0 for x in range(0, uppermm+1)] + # for y in range(np.size(arrayCTCFMissmatchGlobal, 0))] + # Exonsdistance = [[0 for x in range(0, uppermm+1)] + # for y in range(np.size(arrayCTCFMissmatchGlobal, 0))] + # Intronsdistance = [[0 for x in range(0, uppermm+1)] + # for y in range(np.size(arrayCTCFMissmatchGlobal, 0))] + # Promotersdistance = [[0 for x in range(0, uppermm+1)] + # for y in range(np.size(arrayCTCFMissmatchGlobal, 0))] + # Generaldistance = [[0 for x in range(0, uppermm+1)] + # for y in range(np.size(arrayCTCFMissmatchGlobal, 0))] + # DNAsedistance = [[0 for x in range(0, uppermm+1)] + # for y in range(np.size(arrayCTCFMissmatchGlobal, 0))] + for i in range(0, uppermm+1): + + for pos, eleme in enumerate(arrayexonsMissmatchGlobal): + arrayexonsMissmatchGlobal[pos][:,i] = np.sort( eleme[:,i], axis=None) + + # arrayCTCFMissmatchGlobal[:, i] = np.sort( + # arrayCTCFMissmatchGlobal[:, i], axis=None) + arrayprofileMissmatchGlobal[:, i] = np.sort( + arrayprofileMissmatchGlobal[:, i], axis=None) + + # arrayintronsMissmatchGlobal[:, i] = np.sort( + # arrayintronsMissmatchGlobal[:, i], axis=None) + # arraypromotersMissmatchGlobal[:, i] = np.sort( + # arraypromotersMissmatchGlobal[:, i], axis=None) + # arrayDNAseMissmatchGlobal[:, i] = np.sort( + # arrayDNAseMissmatchGlobal[:, i], axis=None) + # arrayexonsMissmatchGlobal[:, i] = np.sort( + # arrayexonsMissmatchGlobal[:, i], axis=None) + + for k in range(0, np.size(arrayexonsMissmatchGlobal[0], 0)): + for pos, elem in enumerate(arrayexonsMissmatchGlobal): + distances[pos][k][i] = abs (arrayexonsMissmatch[pos][0][i] - elem[k,i]) + + + for pos, elem in enumerate(arrayexonsMissmatchGlobal): + test_distances[2][k][i] = abs (arrayexonsMissmatch[2][0][i] - arrayexonsMissmatchGlobal[0][k,i]) + + # CTCFdistance[k][i] = abs( + # arrayCTCFMissmatch[i]-arrayCTCFMissmatchGlobal[k, i]) + # Exonsdistance[k][i] = abs( + # arrayexonsMissmatch[i]-arrayexonsMissmatchGlobal[k, i]) + # Intronsdistance[k][i] = abs( + # arrayintronsMissmatch[i]-arrayintronsMissmatchGlobal[k, i]) + # Promotersdistance[k][i] = abs( + # arraypromotersMissmatch[i]-arraypromotersMissmatchGlobal[k, i]) + Generaldistance[k][i] = abs( + arrayprofileMissmatch[i]-arrayprofileMissmatchGlobal[k, i]) + # DNAsedistance[k][i] = abs( + # arrayDNAseMissmatch[i]-arrayDNAseMissmatchGlobal[k, i]) + array_distances = [] + for pos, elem in enumerate(distances): + array_distances.append(np.array(elem, dtype=int)) + # arrayCTCFdistance = np.array(CTCFdistance, dtype=int) + # arrayExonsFdistance = np.array(Exonsdistance, dtype=int) + # arrayIntronsdistance = np.array(Intronsdistance, dtype=int) + # arrayPromotersdistance = np.array(Promotersdistance, dtype=int) + arrayProfiledistance = np.array(Generaldistance, dtype=int) + # arrayDNAsedistance = np.array(DNAsedistance, dtype=int) + + if profile_file != "no": + # SINGLE MISMATCH COUNT + if len(sys.argv[2]) == 1: #mm inserted is single number + # Set data + data_for_df = {'group': ['A'], 'General': [np.argmin(arrayProfiledistance[:, uppermm])/np.size(arrayexonsMissmatchGlobal[0], 0)]} + for pos, el in enumerate(array_distances): + data_for_df[count_files[pos][0]] = [np.argmin(el[:, uppermm])/np.size(arrayexonsMissmatchGlobal[0], 0) ] + # df = pd.DataFrame({ + # 'group': ['A'], + # 'Exons': [np.argmin(arrayExonsFdistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'General': [np.argmin(arrayProfiledistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'Introns': [np.argmin(arrayIntronsdistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'Promoters': [np.argmin(arrayPromotersdistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'DNAse': [np.argmin(arrayDNAsedistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'CTCF': [np.argmin(arrayCTCFdistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0)] + # }) + df = pd.DataFrame(data_for_df) + # number of variable + categories = list(df)[1:] + N = len(categories) + + # We are going to plot the first line of the data frame. + # But we need to repeat the first value to close the circular graph: + values = df.loc[0].drop('group').values.flatten().tolist() + values += values[:1] + + # What will be the angle of each axis in the plot? (we divide the plot / number of variable) + angles = [n / float(N) * 2 * pi for n in range(N)] + angles += angles[:1] + + # Initialise the spider plot + ax = plt.subplot(2, 2, 1, polar=True) + # ax=plt.subplot(1, 1, 1, polar=True) + # plt.title('RADAR CHART') + + #labels = ['CTCF', 'DNAse', 'Exons', 'General', 'Introns', 'Promoters'] + labels = list(df.columns.values[1:]) + # Draw one axe per variable + add labels labels yet + plt.xticks(angles[:-1], labels, color='black', size=18) + for label, rot in zip(ax.get_xticklabels(), angles): + if (rot == 0): + label.set_horizontalalignment("center") + if (rot > 0): + label.set_horizontalalignment("left") + if (rot > 3): + label.set_horizontalalignment("center") + if (rot > 4): + label.set_horizontalalignment("right") + + # offset posizione y-axis + ax.set_theta_offset(pi / 2) + ax.set_theta_direction(-1) + + # Draw ylabels + ax.set_rlabel_position(0) + plt.yticks([0, 0.25, 0.50, 0.75, 1], ["0", "0.25", + "0.50", "0.75"], color="black", size=18) + plt.ylim(0, 1) + + # Plot data + ax.plot(angles, values, linewidth=1, linestyle='solid') + + # Fill area + ax.fill(angles, values, 'b', alpha=0.1) + + columns = ('Position', '# Targets') + #rows = ('General', 'Exons', 'Introns', 'Promoters', 'DNAse', 'CTCF') + rows = ['General'] + for el in count_files: + rows.append(el[0]) + rows = list(rows) + table_distances= [] + for pos, el in enumerate(array_distances): + table_distances.append(round(np.argmin( + el[:, uppermm])/np.size(arrayexonsMissmatchGlobal[0], 0), 2)) + # exons_distance = round(np.argmin( + # arrayExonsFdistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0), 2) + # introns_distance = round(np.argmin( + # arrayIntronsdistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0), 2) + general_distance = round(np.argmin( + arrayProfiledistance[:, uppermm])/np.size(arrayexonsMissmatchGlobal[0], 0), 2) + # promoters_distance = round(np.argmin( + # arrayPromotersdistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0), 2) + # dnase_distance = round(np.argmin( + # arrayDNAsedistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0), 2) + # ctcf_distance = round(np.argmin( + # arrayCTCFdistance[:, uppermm])/np.size(arrayCTCFMissmatchGlobal, 0), 2) + + temp_vstack = np.vstack([f[0][uppermm] for f in arrayexonsMissmatch]) + offtarget_data = np.vstack((arrayprofileMissmatch[uppermm], temp_vstack)) + + #offtarget_data = np.vstack((arrayprofileMissmatch[uppermm], arrayexonsMissmatch[uppermm], arrayintronsMissmatch[uppermm], + # arraypromotersMissmatch[uppermm], arrayDNAseMissmatch[uppermm], arrayCTCFMissmatch[uppermm])) + # distance_data = np.vstack((general_distance, exons_distance, + # introns_distance, promoters_distance, dnase_distance, ctcf_distance)) + temp_vstack = np.vstack([f for f in table_distances]) + distance_data = np.vstack((general_distance, temp_vstack)) + table_data = np.concatenate((distance_data, offtarget_data), axis=1) + + plt.subplot(2, 2, 2) + table = plt.table(cellText=table_data, rowLabels=rows, + colLabels=columns, loc='center', colWidths=[0.35 for x in columns]) + table.auto_set_font_size(False) + table.set_fontsize(18) + table.scale(1, 3) + plt.axis('off') + plt.savefig("test.summary_single_guide_" + str(guide) + + "_"+str(uppermm) + "mm" + ".png", format="png") + quit() + + datacount = arrayguidesExtendedProfile[missmatch*7] / \ + (max(arrayguidesExtendedProfile[missmatch*7])) + data = np.array(datacount, dtype=float) + data = np.around(data, decimals=1) + data.shape = (1, len(datacount)) + + string = guide[0:20] + strArray = np.array([list(string)]) + + A = arrayguidesExtendedProfile[missmatch*7+1] / \ + (max(arrayguidesExtendedProfile[missmatch*7])) + C = arrayguidesExtendedProfile[missmatch*7+2] / \ + (max(arrayguidesExtendedProfile[missmatch*7])) + G = arrayguidesExtendedProfile[missmatch*7+3] / \ + (max(arrayguidesExtendedProfile[missmatch*7])) + T = arrayguidesExtendedProfile[missmatch*7+4] / \ + (max(arrayguidesExtendedProfile[missmatch*7])) + DNA = arrayguidesExtendedProfile[missmatch*7+5] / \ + (max(arrayguidesExtendedProfile[missmatch*7])) + RNA = arrayguidesExtendedProfile[missmatch*7+6] / \ + (max(arrayguidesExtendedProfile[missmatch*7])) + + # the x locations for the groups + ind = np.arange(0, len(string), 1) + 0.15 + width = 0.7 # the width of the bars: can also be len(x) sequence + + motif = plt.subplot(2, 1, 2, frameon=False) + # motif=plt.subplot(1,1,1) + p1 = plt.bar(ind, A, width, color='red', align='edge') + p2 = plt.bar(ind, C, width, color='blue', bottom=A, align='edge') + p3 = plt.bar(ind, G, width, color='green', bottom=A+C, align='edge') + p4 = plt.bar(ind, T, width, color='purple', bottom=C+G+A, align='edge') + p5 = plt.bar(ind, DNA, width, color='magenta', bottom=C+G+A+T, align='edge') + p6 = plt.bar(ind, RNA, width, color='gold', bottom=C+G+A+T+DNA, align='edge') + plt.xlim(0, len(string)) + plt.xticks([]) + + plt.legend((p1[0], p2[0], p3[0], p4[0], p5[0], p6[0]), + ('A', 'C', 'G', 'T', 'D', 'R'), fontsize=18) + + table = plt.table(cellText=strArray, loc='bottom', + cellLoc='center', rowLoc='bottom') + table.auto_set_font_size(False) + table.set_fontsize(18) + table.scale(1, 1.6) + table.xticks = ([]) + table.yticks = ([]) + + plt.suptitle(str(missmatch)+" Mismatches", + horizontalalignment='center', color='black', size=25) + + plt.tight_layout() + plt.subplots_adjust(top=0.90, bottom=0.07, left=0.05, + right=0.99, wspace=0.12) + + plt.savefig("summary_single_guide_" + str(guide) + + "_"+str(uppermm) + "mm" + ".pdf", format="pdf") + plt.savefig("summary_single_guide_" + str(guide) + + "_"+str(uppermm) + "mm" + ".png", format="png") + # plt.show() + + else: #mms value is range (Eg0-5) + + def make_spider(row, title, count): + data_for_df = {'group': ['A'], 'General': [np.argmin(arrayProfiledistance[:, uppermm])/np.size(arrayexonsMissmatchGlobal[0], 0)]} + for pos, el in enumerate(array_distances): + data_for_df[count_files[pos][0]] = [np.argmin(el[:, uppermm])/np.size(arrayexonsMissmatchGlobal[0], 0) ] + # df = pd.DataFrame({ + # 'group': ['A'], + # 'Exons': [np.argmin(arrayExonsFdistance[:, row])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'General': [np.argmin(arrayProfiledistance[:, row])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'Introns': [np.argmin(arrayIntronsdistance[:, row])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'Promoters': [np.argmin(arrayPromotersdistance[:, row])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'DNAse': [np.argmin(arrayDNAsedistance[:, row])/np.size(arrayCTCFMissmatchGlobal, 0)], + # 'CTCF': [np.argmin(arrayCTCFdistance[:, row])/np.size(arrayCTCFMissmatchGlobal, 0)] + # }) + df = pd.DataFrame(data_for_df) + + # number of variable + categories = list(df)[1:] + N = len(categories) + + # What will be the angle of each axis in the plot? (we divide the plot / number of variable) + angles = [n / float(N) * 2 * pi for n in range(N)] + angles += angles[:1] + + # Initialise the spider plot + ax = plt.subplot(2, (uppermm-lowermm)+1, count, polar=True) + # table=plt.table(cellText=data,colLabels=string,loc='bottom') + + # If you want the first axis to be on top: + ax.set_theta_offset(pi / 2) + ax.set_theta_direction(-1) + + # Draw one axe per variable + add labels labels yet + multi_xticks = ['General (' + str(arrayprofileMissmatch[row])+')'] + + for pos, f in enumerate(arrayexonsMissmatch): + multi_xticks.append(count_files[pos][0] + ' (' + str(f[0][row]) + ')') + #multi_xticks.extend([count_files[pos][0] + ' (' + str(max(f[0][row])) + ')' for pos, f in enumerate(arrayexonsMissmatch)]) + # plt.xticks(angles[:-1], ['CTCF'+' ('+str(arrayCTCFMissmatch[row])+')', 'DNAse'+' ('+str(arrayDNAseMissmatch[row])+')', 'Exons'+' ('+str(arrayexonsMissmatch[row])+')', 'General' + + # ' ('+str(arrayprofileMissmatch[row])+')', 'Introns'+' ('+str(arrayintronsMissmatch[row])+')', 'Promoters'+' ('+str(arraypromotersMissmatch[row])+')'], color='black') + plt.xticks(angles[:-1], multi_xticks, color='black') + for label, rot in zip(ax.get_xticklabels(), angles): + if (rot == 0): + label.set_horizontalalignment("center") + if (rot > 0): + label.set_horizontalalignment("left") + if (rot > 3): + label.set_horizontalalignment("center") + if (rot > 4): + label.set_horizontalalignment("right") + + # Draw ylabels + ax.set_rlabel_position(0) + plt.yticks([0, 0.25, 0.50, 0.75], [ + "0", "0.25", "0.50", "0.75"], color="black") + plt.ylim(0, 1) + + # Ind1 + values = df.loc[0].drop('group').values.flatten().tolist() + values += values[:1] + ax.plot(angles, values, linewidth=2, linestyle='solid') + ax.fill(angles, values, alpha=0.4) + + + texts = ['General: ' + str(max(arrayprofileMissmatchGlobal[:, row]))] + texts.extend([count_files[pos][0] + ': ' + str(max(f[:,row])) for pos, f in enumerate(arrayexonsMissmatchGlobal)]) + # texts = ["CTCF:"+' '+str(max(arrayCTCFMissmatchGlobal[:, row])), "DNAse:"+' '+str(max(arrayDNAseMissmatchGlobal[:, row])), "Exons:"+' '+str(max(arrayexonsMissmatchGlobal[:, row])), "General:" + + # ' '+str(max(arrayprofileMissmatchGlobal[:, row])), "Introns:"+' '+str(max(arrayintronsMissmatchGlobal[:, row])), "Promoters:"+' '+str(max(arraypromotersMissmatchGlobal[:, row]))] + + colors = [] + for i in texts: + colors.append('white') + #colors = ["white", "white", "white", "white", "white", "white"] + patches = [mpatches.Patch(color=colors[i], label="{:s}".format( + texts[i])) for i in range(len(texts))] + plt.legend(handles=patches, loc=(-0.65, 0.86), labelspacing=0.1, + ncol=1, handlelength=0, handletextpad=0, title="Max Value") + + # Add a title + plt.title(title, y=1.2) + + def make_motif(row, count): + + datacount = arrayguidesExtendedProfile[row*7] / \ + (max(arrayguidesExtendedProfile[row*7])) + data = np.array(datacount, dtype=float) + data = np.around(data, decimals=1) + data.shape = (1, len(datacount)) + + string = guide[0:20] + + A = arrayguidesExtendedProfile[row*7+1] / \ + (max(arrayguidesExtendedProfile[row*7])) + C = arrayguidesExtendedProfile[row*7+2] / \ + (max(arrayguidesExtendedProfile[row*7])) + G = arrayguidesExtendedProfile[row*7+3] / \ + (max(arrayguidesExtendedProfile[row*7])) + T = arrayguidesExtendedProfile[row*7+4] / \ + (max(arrayguidesExtendedProfile[row*7])) + + # the x locations for the groups + ind = np.arange(0, len(string), 1) + 0.15 + width = 0.7 # the width of the bars: can also be len(x) sequence + + plt.subplot(2, (uppermm-lowermm)+1, count+(uppermm-lowermm)+1) + + p1 = plt.bar(ind, A, width, color='#d62728', align='edge') + p2 = plt.bar(ind, C, width, bottom=A, align='edge') + p3 = plt.bar(ind, G, width, bottom=A+C, align='edge') + p4 = plt.bar(ind, T, width, bottom=C+G+A, align='edge') + + plt.legend((p1[0], p2[0], p3[0], p4[0]), ('A', 'C', 'G', 'T')) + + plt.xlim(0, len(string)) + plt.xticks([]) + table = plt.table(cellText=data, colLabels=string, + loc='bottom', cellLoc='center') + # plt.xticks(ind) + # table.set_fontsize(20)) + table.auto_set_font_size(False) + table.set_fontsize(14) + table.scale(1, 1.4) + + # ------- PART 2: Apply to all individuals + # initialize the figure + my_dpi = 96 + plt.figure(figsize=(5000/my_dpi, 5000/my_dpi), dpi=my_dpi) + + # Create a color palette: + # my_palette = plt.cm.get_cmap("Set2",(uppermm-lowermm)+1) + + count = 1 + # Loop to plot + for row in range(lowermm, uppermm+1): + make_spider(row=row, title=str(row) + ' Mismatches', count=count) + make_motif(row=row, count=count) + # plt.subplot(3,4,count*2) + count = count+1 + + # plt.tight_layout() + #plt.subplots_adjust(top=0.85, bottom=0.05, left=0.05,right=0.99, wspace=0.1) + + plt.savefig("summary_multiple_guides_" + str(guide) + "_" + + str(lowermm) + "-" + str(uppermm) + "mm" + ".pdf", format="pdf") + plt.savefig("summary_multiple_guides_" + str(guide) + "_" + + str(lowermm) + "-" + str(uppermm) + "mm" + ".png", format="png") + # plt.show() diff --git a/OldScripts/only_cluster.py b/OldScripts/only_cluster.py new file mode 100644 index 0000000..e9e7622 --- /dev/null +++ b/OldScripts/only_cluster.py @@ -0,0 +1,83 @@ +#Script to cluster. Add columns total and cluster pos only if sys2 is false + +#sys1 è semicommon file +#sys2 is True if total and cluster pos are already in the file, else is False +#Output column (not written): Bulge_type, Guide, Target, chr, pos, pos_cluster, direction, mms, bulge, total +import pandas as pd +import time +import sys + +start = time.time() +total_targets = [] +guides_dict = dict() +result_name = sys.argv[1][:sys.argv[1].rfind('.')] + '.cluster.txt' +cluster_pos = False +if sys.argv[2] == 'True': + cluster_pos = True + +with open (sys.argv[1]) as targets: + for line in targets: + if '#' in line: + continue + line = line.strip().split('\t') + if not cluster_pos: + line.append(str(int(line[6]) + int(line[7]))) + if line[5] == '+': + if line[0] == 'DNA': + # line.append(str(int(line[4]) + int(line[7]))) + line.insert(5, str(int(line[4]) + int(line[7]))) + else: + # line.append(str(int(line[4]) - int(line[7]))) + line.insert(5,str(int(line[4]) - int(line[7]))) + else: + # line.append(line[4]) + line.insert(5, line[4]) + try: + guides_dict[line[1].replace('-','')].append(line) + except: + guides_dict[line[1].replace('-','')] = [line] + #total_targets.append(line) + +print('Created set of targets for all guides:', time.time() - start) +start = time.time() +# total_targets.sort(key = lambda x: ( x[3] , int(x[-1]) )) +for k in guides_dict.keys(): + guides_dict[k].sort(key = lambda x: ( x[3] , int(x[5]) )) +#total_targets.sort(key = lambda x: ( x[3] , int(x[5]) )) + +print('Targets sorted:', time.time() - start) + +print('Start clustering') +start_time = time.time() + +with open(result_name, 'w+') as result: + total_targets = [] + for k in guides_dict.keys(): + total_targets += guides_dict[k] + total_list = [] + + first_line = total_targets[0] + # current_chr_pos = first_line[3] + ' ' + first_line[9] + current_chr_pos = first_line[3] + ' ' + first_line[5] + + total_list.append([first_line]) + + for line in total_targets[1:]: + #if line[3] + ' ' + line[9] != current_chr_pos: + if line[3] + ' ' + line[5] != current_chr_pos: + # total_list[-1].sort(key = lambda x: int(x[8])) + total_list[-1].sort(key = lambda x: int(x[9])) + total_list.append([line]) + # current_chr_pos = line[3] + ' ' + line[9] + current_chr_pos = line[3] + ' ' + line[5] + else: + total_list[-1].append(line) + + total_list[-1].sort(key = lambda x: int(x[9])) + + total_list.sort(key = lambda x: int(x[0][9])) + for cluster in total_list: + for target in cluster: + result.write('\t'.join(target) + '\n') + +print("Clustering runtime: %s seconds" % (time.time() - start_time)) \ No newline at end of file diff --git a/OldScripts/pam_analysis.py b/OldScripts/pam_analysis.py new file mode 100644 index 0000000..eb8cf2d --- /dev/null +++ b/OldScripts/pam_analysis.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python + +#Was called min_max.py +#Calculates, for the semicommon file, the total column, the min max mismatches column, the pam disr column, pam creation (with 'n' value), var uniq (with 'n' value) +#sys 1 is semicommon file +#sys 2 is pam file NOTE not tested with different from NRG + +# If a iupac char is lowercase, it means that every real value it corresponds is a mismatch, meaning that changing it with a real char does +# not add a new theoretical mismatch (eg: +# guide: AATCCTAG... +# target: AArCCTAG.. +# targetA: AAaCCTAG... +# targetG: AAgCCTAG... +# ) +# If a iupac char is uppercase, it means that at least one of the corresponding real char is a mismatch (NOTE true only if the guide has no iupac +# char), meaning that there is a theoretical new mismatch (eg: +# guide: AATCCTAG... +# target: AAYCCTAG.. +# targetC: AAcCCTAG... > new mismatch +# targetT: AATCCTAG... +# ) +# TODO nel caso della pam, se ho uno iup nel targ e uno nella guida, prendo quello che non rimane nell'intersezione dei loro valori nel dict +# se ho iup nel tar e normale nella guida, prendo i valori diversi dalla guida nei valori del dict (NOTE faccio set(target) - set(guida)) +# eg NRG +# NMG -> segnalo C +# NGG +# NBG -> segnalo C,T + +#NOTE 06/03 -> removed PAM Disruption calculation +import sys +import itertools +#argv 1 is cluster file +#argv 2 is pam file +#argv 3 is type of genome: var or both +fill_column = True +if sys.argv[3] == 'var': + fill_column = False +with open (sys.argv[2]) as pam: + line = pam.read().strip() + pam = line.split(' ')[0] + len_pam = int(line.split(' ')[1]) + pos_beg = 0 + pos_end = None + pam_begin = 0 + pam_end = len_pam * (-1) + if len_pam < 0: + pam = pam[: (len_pam * (-1))] + len_pam = len_pam * (-1) + pos_beg = len_pam + pos_end = None + pam_begin = 0 + pam_end = len_pam + else: + pam = pam[(len_pam * (-1)):] + pos_beg = 0 + pos_end = len_pam * (-1) + pam_begin = len_pam * (-1) + pam_end = None + +iupac_code = { + "R":("A", "G"), + "Y":("C", "T"), + "S":("G", "C"), + "W":("A", "T"), + "K":("G", "T"), + "M":("A", "C"), + "B":("C", "G", "T"), + "D":("A", "G", "T"), + "H":("A", "C", "T"), + "V":("A", "C", "G"), + "r":("A", "G"), + "y":("C", "T"), + "s":("G", "C"), + "w":("A", "T"), + "k":("G", "T"), + "m":("A", "C"), + "b":("C", "G", "T"), + "d":("A", "G", "T"), + "h":("A", "C", "T"), + "v":("A", "C", "G"), + } +iupac_code_set = { + "R":{"A", "G"}, + "Y":{"C", "T"}, + "S":{"G", "C"}, + "W":{"A", "T"}, + "K":{"G", "T"}, + "M":{"A", "C"}, + "B":{"C", "G", "T"}, + "D":{"A", "G", "T"}, + "H":{"A", "C", "T"}, + "V":{"A", "C", "G"}, + "r":{"A", "G"}, + "y":{"C", "T"}, + "s":{"G", "C"}, + "w":{"A", "T"}, + "k":{"G", "T"}, + "m":{"A", "C"}, + "b":{"C", "G", "T"}, + "d":{"A", "G", "T"}, + "h":{"A", "C", "T"}, + "v":{"A", "C", "G"}, + "A":{"A"}, + "T":{"T"}, + "C":{"C"}, + "G":{"G"}, + "a":{"a"}, + "t":{"t"}, + "c":{"c"}, + "g":{"g"}, + 'N':{'A','T','G','C'} + } + +name_output = sys.argv[1][:sys.argv[1].rfind('.')] +with open (sys.argv[1]) as t, open(name_output + '.minmaxdisr.txt','w+') as result: + header = '#Bulge type\tcrRNA\tDNA\tChromosome\tPosition\tCluster Position\tDirection\tMismatches\tBulge Size\tTotal\tMin_mismatches\tMax_mismatches' + if fill_column: + header += '\tPAM_gen\tVar_uniq' + header += '\n' + result.write(header) + next(t) #Skip header + for line in t: + found_iupac = False + found_iupac_pam = False + line = line.strip().split('\t') + max_mm = int(line[7]) + + for char in line[2][pos_beg: pos_end]: + if char in iupac_code: + found_iupac = True + if char.isupper(): + max_mm = max_mm + 1 + + #Pam disruption + pam_disr = [] + for pos, char in enumerate(line[2][pam_begin:pam_end]): + if char in iupac_code: + + diff_res = iupac_code_set[char] - iupac_code_set[pam[pos]] + if diff_res: + pam_disr.append(diff_res) + found_iupac_pam = True + else: + pam_disr.append(iupac_code_set[char]) + else: + pam_disr.append(char) + + #Add min, max mismatches + if found_iupac: + line.append(line[7]) + line.append(str(max_mm)) + else: + line.append('-') + line.append('-') + + #Add PAM Disruption #06/03 Removed + # if (found_iupac_pam): + # pam_disr_list = [] + # for p in itertools.product(*pam_disr): + # pam_disr_list.append(''.join(p)) + # line.append(','.join(pam_disr_list)) + # else: + # line.append('n') + + #Pam creation + if fill_column: + line.append('n') + + #Uniq var + line.append('n') + + result.write('\t'.join(line) + '\n') + \ No newline at end of file diff --git a/OldScripts/pam_creation.py b/OldScripts/pam_creation.py new file mode 100644 index 0000000..a69f112 --- /dev/null +++ b/OldScripts/pam_creation.py @@ -0,0 +1,253 @@ +# sys argv 1 is input file (uniq.sorted.txt) +# sys argv 2 is pam NOTE tested only with NRG +# sys argv 3 is reference genome dir +# First scan of file: when IUPAC in pam, save position into bedfile +# Use bedtools getfasta to extract all ref sequences +# Second scan of file: add column with AGG,CGG; +# if from C to ref (and it was not a pam), we got S in enr (created new pam). +# Not sure if calling bedtools for each iupac pam found + single scan of file is faster than single bedtools call + 2 scan of file + +#06/03 PAM Disruption removed + +import sys +import subprocess +import os +from os import listdir #for getting directories +from os.path import isfile, isdir,join #for getting directories +import itertools +import time +pam = sys.argv[2].strip().upper() +guide_len = 20 +pam_at_beginning = False +genome_dir = os.path.realpath(sys.argv[3]) +with open (sys.argv[2]) as pam: + line = pam.read().strip() + pam = line.split(' ')[0] + len_pam = int(line.split(' ')[1]) + guide_len = len(pam) - len_pam + pos_beg = 0 + pos_end = None + pam_begin = 0 + pam_end = len_pam * (-1) + if len_pam < 0: + guide_len = len(pam) + len_pam + pam = pam[: (len_pam * (-1))] + len_pam = len_pam * (-1) + pos_beg = len_pam + pos_end = None + pam_begin = 0 + pam_end = len_pam + pam_at_beginning = True + else: + pam = pam[(len_pam * (-1)):] + pos_beg = 0 + pos_end = len_pam * (-1) + pam_begin = len_pam * (-1) + pam_end = None + +print(guide_len) +name_output = sys.argv[1][:sys.argv[1].rfind('.')] + +iupac_code = { + "R":("A", "G"), + "Y":("C", "T"), + "S":("G", "C"), + "W":("A", "T"), + "K":("G", "T"), + "M":("A", "C"), + "B":("C", "G", "T"), + "D":("A", "G", "T"), + "H":("A", "C", "T"), + "V":("A", "C", "G"), + "r":("A", "G"), + "y":("C", "T"), + "s":("G", "C"), + "w":("A", "T"), + "k":("G", "T"), + "m":("A", "C"), + "b":("C", "G", "T"), + "d":("A", "G", "T"), + "h":("A", "C", "T"), + "v":("A", "C", "G"), + 'N':('A', 'T', 'C', 'G') + } + +iupac_code_set = { + "R":{"A", "G"}, + "Y":{"C", "T"}, + "S":{"G", "C"}, + "W":{"A", "T"}, + "K":{"G", "T"}, + "M":{"A", "C"}, + "B":{"C", "G", "T"}, + "D":{"A", "G", "T"}, + "H":{"A", "C", "T"}, + "V":{"A", "C", "G"}, + "r":{"A", "G"}, + "y":{"C", "T"}, + "s":{"G", "C"}, + "w":{"A", "T"}, + "k":{"G", "T"}, + "m":{"A", "C"}, + "b":{"C", "G", "T"}, + "d":{"A", "G", "T"}, + "h":{"A", "C", "T"}, + "v":{"A", "C", "G"}, + "A":{"A"}, + "T":{"T"}, + "C":{"C"}, + "G":{"G"}, + "a":{"a"}, + "t":{"t"}, + "c":{"c"}, + "g":{"g"}, + 'N':{'A','T','G','C'} + } +rev_comp_pam = dict() +for i in range(len_pam): + rev_comp_pam[i] = len_pam - i - 1 +# rev_comp_pam = { +# 0 : 2, +# 1: 1, +# 2: 0 +# } + +def rev_comp(a): + if a == 'A' or a == 'a': + return 'T' + if a == 'T' or a == 't': + return 'A' + if a == 'C' or a == 'c': + return 'G' + return 'C' +start_time = time.time() + +#NOTE metto solo i '-' in min max mismatch e la y in var uniq + +with open(sys.argv[1]) as uniq, open(name_output + '.pamcreation.txt', 'w+') as res: + for line in uniq: + if '#' in line: + continue + line = line.strip().split('\t') + + #Min + line.append('-') + #max + line.append('-') + #PAM dis #06/03 REMOVED + # line.append('n') + #Pam create + + line.append('n') + #Var uniq + line.append('y') + + res.write('\t'.join(line) + '\n') + +exit() + + + + +#NOTE inserito pam creation in annotator + +#Bedtools estrae da start(compreso) a end(non compreso) +if not pam_at_beginning: + with open (sys.argv[1]) as uniq, open('bedfile.bed', 'w+') as bedfile: + #header=uniq.readline() #NOTE uncomment if file has header + for line in uniq: + if '#' in line: + continue + line = line.strip().split('\t') + for pos, char in enumerate(line[2][pam_begin:pam_end]): + if char in iupac_code: + if line[0] == 'DNA': + bulges = int(line[8]) + elif line[0] == 'RNA': + bulges = int(line[8]) * (-1) + else: + bulges = 0 + if line[6] == '+': + # La pos (line[4] mi indica la prima lettera del target, eg la prima A in ACGCGACTAGCTACGCACGTNRG) + bedfile.write(line[3] + '\t' + str( int(line[4]) + guide_len + bulges + pos) + '\t' + str(int(line[4]) + guide_len + bulges + pos + 1) +'\n') + else: + # La pos (line[4] mi indica la prima lettera del target, che sarebbe la prima lettera della pam rev compl, + # eg la prima C in CCGTGCATACTAGCTACGCACGT) + bedfile.write(line[3] + '\t' + str(int(line[4]) + rev_comp_pam[pos]) + '\t' + str(int(line[4]) + rev_comp_pam[pos] + 1) +'\n') +else: + with open (sys.argv[1]) as uniq, open('bedfile.bed', 'w+') as bedfile: + #header=uniq.readline() #NOTE uncomment if file has header + for line in uniq: + if '#' in line: + continue + line = line.strip().split('\t') + for pos, char in enumerate(line[2][pam_begin:pam_end]): + if char in iupac_code: + if line[0] == 'DNA': + bulges = int(line[8]) + elif line[0] == 'RNA': + bulges = int(line[8]) * (-1) + else: + bulges = 0 + if line[6] == '+': + #SEE file pamatbeginning.txt + bedfile.write(line[3] + '\t' + str( int(line[4]) + pos) + '\t' + str(int(line[4]) + pos + 1) +'\n') + else: + # #SEE file pamatbeginning.txt + bedfile.write(line[3] + '\t' + str(int(line[4]) + guide_len + bulges + rev_comp_pam[pos]) + '\t' + str(int(line[4]) + guide_len + bulges + rev_comp_pam[pos] + 1) +'\n') + +print('Created bedfile:', time.time() - start_time) +start_time = time.time() +bed_file = os.path.realpath('bedfile.bed') +subprocess.call(['../../PostProcess/./ext_seq_pam_creation.sh ' + genome_dir + ' ' + bed_file], shell = True) +print('Bedtools DONE:', time.time() - start_time) +onlyfile = [f for f in listdir('tmp_seq') if isfile(join('tmp_seq', f))] +start_time = time.time() +open_file = dict() + +for i in onlyfile: + open_file[i.split('.fa')[0]] = open('tmp_seq/' + i) + +with open(sys.argv[1]) as uniq, open(name_output + '.pamcreation.txt', 'w+') as res: + for line in uniq: + if '#' in line: + continue + line = line.strip().split('\t') + file_corresponding = line[3] + total_line = [] + found_creation = False + for pos, char in enumerate(line[2][pam_begin:pam_end]): + if char in iupac_code: + ref_char = open_file[file_corresponding].readline().strip().upper() + if line[6] == '-': + ref_char = rev_comp(ref_char) + + char_to_write = iupac_code_set[pam[pos]] & iupac_code_set[ref_char] + + if not char_to_write: + found_creation = True + char_to_write = iupac_code_set[pam[pos]] & iupac_code_set[char] + total_line.append(char_to_write) + else: + total_line.append(char) + + #Min + line.append('-') + #max + line.append('-') + #PAM dis #06/03 REMOVED + # line.append('n') + #Pam create + if found_creation: + list_pam = [] + for el in itertools.product(*total_line): + list_pam.append(''.join(el)) + line.append(','.join(list_pam)) + else: + line.append('n') + #Var uniq + line.append('y') + + res.write('\t'.join(line) + '\n') + +print('Done', time.time() - start_time) diff --git a/OldScripts/reassign_sample_to_cluster.py b/OldScripts/reassign_sample_to_cluster.py new file mode 100644 index 0000000..300a4d5 --- /dev/null +++ b/OldScripts/reassign_sample_to_cluster.py @@ -0,0 +1,40 @@ +''' +Script reads the top_1.sample file, for each cluster create a entry in dictionary +guide,chr,clusterpos -> union of all samples in this cluster +The opens the cluster targets file and for each top1 of the cluster assign the corresponding +dictionary entry. For the other targets in the cluster writes 'n' -> TODO pensare a cosa scrivere +''' + +#TODO migliorarlo per eventuale file top1_sample con dimensioni molto grandi +# sys1 is targets.cluster file +# sys2 is top1.samples file +# sys3 is job_id +import sys + +sample_dict = dict() +current_pos = '0' +with open(sys.argv[2], 'r') as top_samples: + for line in top_samples: + if '#' in line: + continue + line = line.strip().split('\t') + try: + sample_dict[line[1].replace('-','') + line[3] + line[5]] = sample_dict[line[1].replace('-','') + line[3] + line[5]].union(set(line[-2].split(','))) + except: + sample_dict[line[1].replace('-','') + line[3] + line[5]] = set(line[-2].split(',')) + +with open(sys.argv[1]) as targets, open(sys.argv[3] + '.final.txt', 'w+') as result: + for line in targets: + if '#' in line: + continue + line = line.strip().split('\t') + if current_pos != line[1].replace('-','') + line[3] + line[5]: + try: + line.append(','.join(sample_dict[line[1].replace('-','') + line[3] + line[5]])) + except: + line.append('n') + current_pos = line[1].replace('-','') + line[3] + line[5] + else: + line.append('n') + result.write('\t'.join(line) + '\t' + line[1].replace('-','') + '\n') #line[1].replace('-','') added to do better grep + diff --git a/OldScripts/scores_guide_table.old.py b/OldScripts/scores_guide_table.old.py new file mode 100644 index 0000000..c3d95e6 --- /dev/null +++ b/OldScripts/scores_guide_table.old.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python + + +# Script that calculates cfd score for targets with bulge 'X', and save the accumulated cfd score for the input guide +# Also calculates the Doench score if the targets has bulge 'X' and 0 mms (doench = 0 if no such target exists) + +#argv 1 = target file +#argv 2 is genome_directory (eg ../../Genomes/hg19/) +#argv 3 is pam file -> to check if len is 23 and pam is NGG +#argv 4 is guide file +import time +import pickle +import re +import sys +import os +import numpy as np +import subprocess +import azimuth.model_comparison +import string +import itertools + +# doench_string.append(seq) +# doench_score = azimuth.model_comparison.predict(np.asarray(doench_string), None, None, model= model, pam_audit=False) +# doench_score = np.around(doench_score * 100) +def doenchForIupac(sequence_doench, model): + pos_iupac = [] + var = [] + for pos, c in enumerate(sequence_doench): + if c in iupac_code: + pos_iupac.append(pos) + var.append(iupac_code[c]) + + target_combination = [] + if var: + for i in itertools.product(*var): + t = list(sequence_doench) + for p, el in enumerate(pos_iupac): + t[el] = i[p] + target_combination.append(''.join(t)) + else: + target_combination.append(sequence_doench) + + doench_score = azimuth.model_comparison.predict(np.asarray(target_combination), None, None, model= model, pam_audit=False) + doench_score = [np.around(i * 100) for i in doench_score] + return int(max(doench_score)) + + +def get_mm_pam_scores(): + try: + mm_scores = pickle.load(open(os.path.dirname(os.path.realpath(__file__)) + '/mismatch_score.pkl', 'rb')) + pam_scores = pickle.load(open(os.path.dirname(os.path.realpath(__file__)) +'/PAM_scores.pkl', 'rb')) + return (mm_scores, pam_scores) + except: + raise Exception("Could not find file with mismatch scores or PAM scores") + + +def revcom(s): + basecomp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A'} + letters = list(s[::-1]) + letters = [basecomp[base] for base in letters] + return ''.join(letters) + + +# Calculates CFD score +def calc_cfd(guide_seq, sg, pam, mm_scores, pam_scores): + + score = 1 + dna_gp = 0 + sg = sg.replace('T', 'U') + guide_seq = guide_seq.replace('T', 'U') + s_list = list(sg) + guide_seq_list = list(guide_seq) + for i, sl in enumerate(s_list): + + if guide_seq_list[i] == sl: + + score *= 1 + + else: + key = 'r' + guide_seq_list[i] + ':d' + revcom(sl) + ',' + str(i + 1) + score *= mm_scores[key] + if '-' in guide_seq_list[i]: + dna_gp = dna_gp + 1 + + score *= pam_scores[pam] + + return score + +tab = str.maketrans("ACTG", "TGAC") + +def reverse_complement_table(seq): + return seq.translate(tab)[::-1] +#if __name__ == '__main__': + +with open(sys.argv[3]) as pamfile: + line = pamfile.readline().strip().split(' ') + if len(line[0]) != 23 or 'NGG' not in line[0]: + with open('acfd.txt', 'w+') as result: + result.write('NO SCORES') + exit() + +mm_scores, pam_scores = get_mm_pam_scores() +guides_dict = dict() +guides_dict_doench = dict() + +iupac_code = { + "R":("A", "G"), + "Y":("C", "T"), + "S":("G", "C"), + "W":("A", "T"), + "K":("G", "T"), + "M":("A", "C"), + "B":("C", "G", "T"), + "D":("A", "G", "T"), + "H":("A", "C", "T"), + "V":("A", "C", "G"), + "r":("A", "G"), + "y":("C", "T"), + "s":("G", "C"), + "w":("A", "T"), + "k":("G", "T"), + "m":("A", "C"), + "b":("C", "G", "T"), + "d":("A", "G", "T"), + "h":("A", "C", "T"), + "v":("A", "C", "G"), + } + +start = time.time() + +enr = sys.argv[2].split('/') +enr_str = '' +if enr[-1]: + if'+' in enr[-1]: + enr_str = '.enriched' +else: + if'+' in enr[-2]: + enr_str = '.enriched' +with open( os.path.dirname(os.path.realpath(__file__)) + "/azimuth/saved_models/V3_model_nopos.pickle", 'rb') as f: + model = pickle.load(f) +max_doench = 0 +n_of_acceptable_cfd = 0 +sum_cfd = 0 +cfd_scores = [] + +all_word = [] +with open (sys.argv[1]) as result: + + #Calc CDF score + for target in result: + target = target.strip().split('\t') + if 'X' not in target[0]: + continue + + guide_seq = target[1] + off = target[2].upper() + m_guide_seq = re.search('[^ATCGN-]', guide_seq) + m_off = re.search('[^ATCG-]', off) + iup_off = [] + first = True + start_iup_off = 1 + + if (m_guide_seq is None) and (m_off is None): + + #Calc CFD + + + pam = off[-2:] + sg = off[:-3] + #print("off. ", off) + #print ("sg: ", sg) + #print ("guide_seq: ", guide_seq) + + cfd_score = calc_cfd(guide_seq, sg, pam, mm_scores, pam_scores) + if (target[7] == '0'): #TODO se cambio inserendo pos cluister, devo cambiareanche qui, da 6 a 7 (con colonna pos cluster) + #estraggo sequenza + with open('bedfile_tmp.bed', 'w+') as bedfile: + if target[5] == '+': + bedfile.write(target[3] + '\t' + str(int(target[4]) - 4 ) + '\t' + str(int(target[4]) + 23 + 3 )) + else: + bedfile.write(target[3] + '\t' + str(int(target[4]) - 3 ) + '\t' + str(int(target[4]) + 23 + 4 )) + + extr = subprocess.Popen(['bedtools getfasta -fi ' + sys.argv[2] + '/' + target[3] + enr_str +'.fa' ' -bed bedfile_tmp.bed'], shell = True, stdout=subprocess.PIPE) #TODO insert option for .fasta + extr.wait() + out, err = extr.communicate() + out = out.decode('UTF-8') + if target[5] == '+': + sequence_doench = out.strip().split('\n')[-1].upper() + else: + sequence_doench = reverse_complement_table(out.strip().split('\n')[-1].upper()) + # doench_score = azimuth.model_comparison.predict(np.asarray(sequence_doench), None, None, model= model, pam_audit=False) + # doench_score = np.around(doench_score * 100) + # doench_score = doench_score[0] + doench_score = doenchForIupac(sequence_doench, model) + try: + if doench_score > guides_dict_doench[target[1]]: + guides_dict_doench[target[1]] = doench_score + except: + guides_dict_doench[target[1]] = doench_score + + sum_cfd = sum_cfd + cfd_score + try: + guides_dict[target[1]] = guides_dict[target[1]] + cfd_score + except: + guides_dict[target[1]] = cfd_score + if cfd_score > 0.023: + n_of_acceptable_cfd = n_of_acceptable_cfd +1 + + else: + if "N" in off: + continue + if (target[7] == '0'): #NOTE change from 6 to 7 if input file has cluster position column + with open('bedfile_tmp.bed', 'w+') as bedfile: + if target[5] == '+': + bedfile.write(target[3] + '\t' + str(int(target[4]) - 4 ) + '\t' + str(int(target[4]) + 23 + 3 )) + else: + bedfile.write(target[3] + '\t' + str(int(target[4]) - 3 ) + '\t' + str(int(target[4]) + 23 + 4 )) + + extr = subprocess.Popen(['bedtools getfasta -fi ' + sys.argv[2] + '/' + target[3] + enr_str +'.fa' ' -bed bedfile_tmp.bed'], shell = True, stdout=subprocess.PIPE) #TODO insert option for .fasta + extr.wait() + out, err = extr.communicate() + out = out.decode('UTF-8') + if target[5] == '+': + sequence_doench = out.strip().split('\n')[-1].upper() + else: + sequence_doench = reverse_complement_table(out.strip().split('\n')[-1].upper()) + # pos_iupac = [] + # var = [] + # for pos, c in enumerate(sequence_doench): + # if c in iupac_code: + # pos_iupac.append(pos) + # var.append(iupac_code[c]) + + # target_combination = [] + # for i in itertools.product(*var): + # t = list(sequence_doench) + # for p, el in enumerate(pos_iupac): + # t[el] = i[p] + # target_combination.append(''.join(t)) + + # doench_score = azimuth.model_comparison.predict(np.asarray(target_combination), None, None, model= model, pam_audit=False) + # doench_score = [np.around(i * 100) for i in doench_score] + m_doench = doenchForIupac(sequence_doench, model) + try: + if m_doench > guides_dict_doench[target[1]]: + guides_dict_doench[target[1]] = m_doench + except: + guides_dict_doench[target[1]] = m_doench + + i = 0 + for char in off: + if char in iupac_code: + n = len(iup_off) + for list_char in iupac_code[char]: + if not first: + for test in range(n - start_iup_off, n): + iup_off.append(iup_off[test][:i] + list_char + iup_off[test][i+1:]) + + + else: + iup_off.append(off[:i] + list_char + off[i+1:]) + first = False + start_iup_off = start_iup_off * len(iupac_code[char]) + + i += 1 + dna_gap_removal = True + for no_iup_str in range(len(iup_off) - start_iup_off, len(iup_off)): + + + no_iup_gap_srt = iup_off[no_iup_str] #se non ci sono gap passo la stringa non modificata al calc_cfd + + #Calc CFD + + + pam = no_iup_gap_srt[-2:] + sg = no_iup_gap_srt[:-3] + + cfd_score = calc_cfd(guide_seq, sg, pam, mm_scores, pam_scores) + sum_cfd = sum_cfd + cfd_score + try: + guides_dict[target[1]] = guides_dict[target[1]] + cfd_score + except: + guides_dict[target[1]] = cfd_score + +job_id = sys.argv[1].split('/')[-1].split('.')[0] + + + +with open( 'acfd.txt', 'w+') as res, open(sys.argv[4], 'r') as guides: + guides = guides.read().strip().split('\n') + for g in guides: + if g not in guides_dict: + guides_dict[g] = 0 + #for k in guides_dict.keys(): + if g not in guides_dict_doench: + guides_dict_doench[g] = 0 + res.write(g + '\t' + str(guides_dict[g]) + '\t' + str(guides_dict_doench[g]) + '\n') + + +end = time.time() diff --git a/OldScripts/submit_job.sh b/OldScripts/submit_job.sh new file mode 100644 index 0000000..171cc6d --- /dev/null +++ b/OldScripts/submit_job.sh @@ -0,0 +1,300 @@ +#!/bin/sh +#$1 is directory of result for submitted job id (Results/job_id) +#$2 is genome_selected directory Eg Genomes/hg19_ref or Genomes/hg19_ref+1000genomeproject #NOTE + or - to decide +#$3 is genome_ref directory Eg Genomes/hg19_ref +#$4 is genome_idx directory Eg genome_library/NGG_2_hg19_ref+hg19_1000genomeproject or genome_library/NGG_2_hg19_ref +#$5 is pam file Eg Results/72C1MNXDWF/pam.txt +#$6 is guides file Eg Results/LNHM6F3REO/guides.txt (both upload file and custom inserted) +#$7 is mms +#$8 is dna +#$9 is rna +#$10 is search_index +#$11 is search +#$12 is annotation +#$13 is generate report +#$14 is gecko comparison +#$15 is genome_ref comparison +#$16 is genme_idx_ref (for genome_ref comparison if search was done with indices) Eg genome_library/NGG_2_hg19_ref +#$17 is send_email +#$18 is annotation file EG annotations/hg19_ref.annotations.bed +#$19 is genome type, can be 'ref', 'var', 'both' +#Note that if genome_selected is not enriched, the python exe will force $15 as false +jobid=$(basename $1) +echo 'Job\tStart\t'$(date)> $1'/'log.txt +used_genome_dir=$2 + +#Start search index #NOTE new version 2.1.2 of crispritz needed +echo 'Search-index\tStart\t'$(date) >> $1'/'log.txt +echo 'Search_output '${19} > $1/output.txt +if [ ${10} = 'True' ]; then + #echo 'crispritz search-index' + crispritz.py search $4 $5 $6 $jobid -index -mm $7 -bDNA $8 -bRNA ${9} -t >> $1/output.txt #TODO sistemare l'output redirection + mv ./$jobid.*.txt $1 + mv ./$jobid.*.xls $1 + + if [ ${15} = 'True' ]; then + mkdir $1'/ref' + echo 'Search_output_ref '${19} >> $1/output.txt + crispritz.py search ${16} $5 $6 $jobid'_ref' -index -mm $7 -bDNA $8 -bRNA ${9} -t >> $1/output.txt #TODO sistemare l'output redirection + mv ./$jobid'_ref'.*.txt $1/ref + mv ./$jobid'_ref'.*.xls $1/ref + fi +fi +echo 'Search-index\tDone\t'$(date) >> $1'/'log.txt + +#Start search +echo 'Search\tStart\t'$(date) >> $1'/'log.txt +if [ ${11} = 'True' ]; then + #echo 'crispritz search' + crispritz.py search $used_genome_dir $5 $6 $jobid -mm $7 -t >> $1/output.txt #-scores $3 + mv ./$jobid.*.txt $1 + mv ./$jobid.*.xls $1 + if [ ${15} = 'True' ]; then + mkdir $1'/ref' + echo 'Search_output_ref '${19} >> $1/output.txt + crispritz.py search $3 $5 $6 $jobid'_ref' -mm $7 -t >> $1/output.txt + mv ./$jobid'_ref'.*.txt $1/ref + mv ./$jobid'_ref'.*.xls $1/ref + fi +fi +echo 'Search\tDone\t'$(date) >> $1'/'log.txt + + +#Start annotation #NOTE new version 2.1.2 of crispritz needed +echo 'Annotation\tStart\t'$(date) >> $1'/'log.txt +echo 'Annotate_output '${19} > $1/output.txt +if [ ${12} = 'True' ]; then + #echo 'crispritz annotate' + if [ ${10} = 'True' ]; then #Indexed search was done, read profile complete + crispritz.py annotate-results $1'/'/$jobid.profile_complete.xls $1'/'$jobid'.targets.txt' ${18} $jobid >> $1/output.txt + fi + if [ ${11} = 'True' ]; then #Normal search was done, read profile + crispritz.py annotate-results $1'/'/$jobid.profile.xls $1'/'$jobid'.targets.txt' ${18} $jobid >> $1/output.txt + fi + mv ./$jobid.Annotation*.txt $1 + if [ ${15} = 'True' ]; then + echo 'Annotate_output_ref '${19} >> $1/output.txt + if [ ${10} = 'True' ]; then + crispritz.py annotate-results $1'/ref/'$jobid'_ref.profile_complete.xls' $1'/ref/'$jobid'_ref.targets.txt' ${18} $jobid'_ref' >> $1/output.txt + fi + if [ ${11} = 'True' ]; then + crispritz.py annotate-results $1'/ref/'$jobid'_ref.profile.xls' $1'/ref/'$jobid'_ref.targets.txt' ${18} $jobid'_ref' >> $1/output.txt + fi + mv ./$jobid'_ref'.Annotation*.txt $1/ref + fi +fi +echo 'Annotation\tDone\t'$(date) >> $1'/'log.txt + +#Start generate report +echo 'Report\tStart\t'$(date) >> $1'/'log.txt +#python summary_guide.py $1 $7 #New annotated file are alredy located in right directories +profile_type='profile_complete' +if [ ${11} = 'True' ]; then + profile_type='profile' +fi + +cd $1 +if [ ${13} = 'True' ]; then + echo 'Generate_report' > output.txt + proc=$(($7 + 1)) + while IFS= read -r line || [ -n "$line" ]; do + if [ ${14} = 'True' ]; then #If -gecko + if [ ${15} = 'True' ]; then #If genome_ref comparison + + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -profile $jobid'.'$profile_type'.xls' -extprofile *.extended_profile.xls -annotation $jobid'.Annotation.txt' -sumref ref/$jobid'_ref'.Annotation.txt -gecko + else + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -profile $jobid'.'$profile_type'.xls' -extprofile *.extended_profile.xls -annotation $jobid'.Annotation.txt' -gecko + fi + else + if [ ${15} = 'True' ]; then #If genome_ref comparison + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -profile $jobid'.'$profile_type'.xls' -extprofile *.extended_profile.xls -annotation $jobid'.Annotation.txt' -sumref ref/$jobid'_ref'.Annotation.txt + else + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -profile $jobid'.'$profile_type'.xls' -extprofile *.extended_profile.xls -annotation $jobid'.Annotation.txt' + fi + fi + echo $line >> output.txt + done < guides.txt + +fi + + +cd ../../ + +mkdir assets/Img/$jobid +cp $PWD/$1/*.png assets/Img/$jobid/ +echo 'Report\tDone\t'$(date) >> $1'/'log.txt + +#Temp (find better way when scores will be modified), sum of cfd per guide +#TODO scores_test will be substituted with -scores option with crispritz 2.1.2 +echo 'PostProcess\tStart\t'$(date) >> $1'/'log.txt +cd $1 +echo 'Post Process start' +echo 'Start Scoring' +python ../../PostProcess/scores_guide_table.py $jobid.targets.txt ../../$used_genome_dir pam.txt guides.txt +echo 'End Scoring' +#Analysis for var/ref type ('both') +if [ ${19} = 'both' ]; then #TODO CHECK FOR LAST COL INDICES + #Estract common, semicommon and unique + echo 'Start creation semicommon, common, unique' + ../../PostProcess/./extraction.sh ref/$jobid'_ref.targets.txt' $jobid.targets.txt $jobid + echo 'End creation semicommon, common, unique' + #Cluster semicommon e uniq -> TODO da sistemare l'ordine dell'analisi + echo 'Start cluster semicommon' + python ../../PostProcess/cluster.dict.py $jobid.semi_common_targets.txt + echo 'End cluster semicommon' + echo 'Start cluster unique' + python ../../PostProcess/cluster.dict.py $jobid.unique_targets.txt + echo 'End cluster unique' + #Pam analysis + echo 'Start pam analysis' + python ../../PostProcess/pam_analysis.py $jobid.semi_common_targets.cluster.txt pam.txt ${19} # > $jobid.semi_common_targets.cluster.minmaxdisr.txt + echo 'End pam analysis' + echo 'Start pam creation' + python ../../PostProcess/pam_creation.py $jobid.unique_targets.cluster.txt pam.txt ../../$3 # > $jobid.unique_targets.cluster.pamcreation.txt + echo 'End pam creation' + cat $jobid.unique_targets.cluster.pamcreation.txt $jobid.semi_common_targets.cluster.minmaxdisr.txt > $jobid.total.txt + #Summary guide, pos + echo 'Start summary by guide and position' + python ../../PostProcess/summary_by_guide_position.py $jobid.total.txt $7 $8 $9 guides.txt $jobid 'Uniq' + echo 'End summary by guide and position' + # mv $jobid.total.txt $jobid.targets.cluster.txt + #Top 1 extraction + echo 'Start top 1 extraction' + python ../../PostProcess/extract_top.py $jobid.total.txt $jobid # > $jobid.top_1.txt + echo 'End top 1 extraction' + #Top1 expansion + echo 'Start sort' + sort -k4,4 $jobid.top_1.txt > $jobid.top_1.sort.txt && mv $jobid.top_1.sort.txt $jobid.top_1.txt + echo 'End sort' + echo 'Start calc samples' + python ../../PostProcess/calc_samples_faster.py ../../../dictionaries $jobid.top_1.txt #> $jobid.top_1.samples.txt + echo 'End calc samples' + #Summary samples + echo 'Start summary by samples' + python ../../PostProcess/summary_by_samples.py $jobid.top_1.samples.txt $jobid ${19} guides.txt + echo 'End summary by samples' + #Annotazioni per samples, pop, superpop + echo 'Start annotation samples' + python ../../PostProcess/annotation_samples.py $jobid.top_1.samples.txt $jobid.Annotation.targets.txt $jobid.Annotation.txt $jobid + echo 'End annotation samples' + #Rimettere i samples nel file di cluster (solo nel top1) + echo 'Start creating final file' + python ../../PostProcess/reassign_sample_to_cluster.py $jobid.total.txt $jobid.top_1.samples.txt $jobid # > $jobid.final.txt + echo 'End creating final file' + + # #TODO sistemare fare script unico + # python ../../PostProcess/cluster.dict.py ref/$jobid'_ref'.targets.txt #jobid_ref.targets.cluster.txt + # python ../../PostProcess/extract_top.py ref/$jobid'_ref'.targets.cluster.txt $jobid'_ref' # > $jobid_ref.top_1.txt + # python ../../file_per_crispritz/annotator.py ../../${18} $jobid'_ref'.top_1.txt $jobid'_ref' # $jobid'_ref'.Annotation.summary.txt + # python ../../PostProcess/tmp_top1_annotation.py $jobid ./ + # python ../../PostProcess/tmp_top1_annotation.py $jobid'_ref' ./ref/ + # if [ ${13} = 'True' ]; then + # echo 'Generate_report' > output.txt + # proc=$(($7 + 1)) + # while IFS= read -r line || [ -n "$line" ]; do + # if [ ${14} = 'True' ]; then #If -gecko + # if [ ${15} = 'True' ]; then #If genome_ref comparison + + # printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % python ../../PostProcess/radar_chart_new.py $line % $jobid'.tmp_res.txt' *.extended_profile.xls $jobid'_ref'.Annotation.summary.txt /home/ubuntu/miniconda3/opt/crispritz/Python_Scripts/Plot/gecko/ + + # else + # printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % python ../../PostProcess/radar_chart_new.py $line % $jobid'.tmp_res.txt' *.extended_profile.xls /home/ubuntu/miniconda3/opt/crispritz/Python_Scripts/Plot/gecko/ + + # fi + # else + # if [ ${15} = 'True' ]; then #If genome_ref comparison + # printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % python ../../PostProcess/radar_chart_new.py $line % $jobid'.tmp_res.txt' *.extended_profile.xls $jobid'_ref'.Annotation.summary.txt no + # else + # printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % python ../../PostProcess/radar_chart_new.py $line % $jobid'.tmp_res.txt' *.extended_profile.xls no + # fi + # fi + # echo $line >> output.txt + # done < guides.txt + # fi +fi + +#Clustering for var and ref +if [ ${19} = 'ref' ]; then + echo 'Start cluster ref' + python ../../PostProcess/cluster.dict.py $jobid.targets.txt 'addGuide' + echo 'End cluster ref' +elif [ ${19} = 'var' ]; then + echo 'Start cluster var' + python ../../PostProcess/cluster.dict.py $jobid.targets.txt + echo 'End cluster var' +fi + + +if [ ${19} = 'ref' ]; then + type_post='No' + python ../../PostProcess/summary_by_guide_position.py $jobid.targets.cluster.txt $7 $8 $9 guides.txt $jobid $type_post +elif [ ${19} = 'var' ]; then + type_post='No' + python ../../PostProcess/summary_by_guide_position.py $jobid.targets.cluster.txt $7 $8 $9 guides.txt $jobid $type_post + + echo 'Start pam analysis' + python ../../PostProcess/pam_analysis.py $jobid.targets.cluster.txt pam.txt ${19} + echo 'End pam analysis' + # Extract top 1 + echo 'Start extract top1' + python ../../PostProcess/extract_top.py $jobid.targets.cluster.minmaxdisr.txt $jobid # > $jobid.top_1.txt + echo 'End extract top1' + # Expand top 1 + echo 'Start sort' + sort -k4,4 $jobid.top_1.txt > $jobid.top_1.sort.txt && mv $jobid.top_1.sort.txt $jobid.top_1.txt + echo 'End sort' + echo 'Start calc samples' + python ../../PostProcess/calc_samples_faster.py ../../../dictionaries $jobid.top_1.txt #> $jobid.top_1.samples.txt + echo 'End calc samples' + # Summary by samples table + echo 'Start summary by samples' + python ../../PostProcess/summary_by_samples.py $jobid.top_1.samples.txt $jobid ${19} guides.txt + echo 'End summary by samples' + echo 'Start annotation samples' + python ../../PostProcess/annotation_samples.py $jobid.top_1.samples.txt $jobid.Annotation.targets.txt $jobid.Annotation.txt $jobid + echo 'End annotation samples' + #Rimettere i samples nel file di cluster (solo nel top1) + echo 'Start creating final file' + python ../../PostProcess/reassign_sample_to_cluster.py $jobid.targets.cluster.minmaxdisr.txt $jobid.top_1.samples.txt $jobid # > $jobid.final.txt + echo 'End creating final file' + + # #TODO sistemare fare script unico + + # python ../../PostProcess/tmp_top1_annotation.py $jobid ./ + # python ../../PostProcess/tmp_top1_annotation.py $jobid'_ref' ./ref/ + # if [ ${13} = 'True' ]; then + # echo 'Generate_report' > output.txt + # proc=$(($7 + 1)) + # while IFS= read -r line || [ -n "$line" ]; do + # if [ ${14} = 'True' ]; then #If -gecko + # if [ ${15} = 'True' ]; then #If genome_ref comparison + + # printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % python ../../PostProcess/radar_chart_new.py $line % $jobid'.tmp_res.txt' *.extended_profile.xls ref/$jobid'_ref.tmp_res.txt' /home/ubuntu/miniconda3/opt/crispritz/Python_Scripts/Plot/gecko/ + + # else + # printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % python ../../PostProcess/radar_chart_new.py $line % $jobid'.tmp_res.txt' *.extended_profile.xls /home/ubuntu/miniconda3/opt/crispritz/Python_Scripts/Plot/gecko/ + + # fi + # else + # if [ ${15} = 'True' ]; then #If genome_ref comparison + # printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % python ../../PostProcess/radar_chart_new.py $line % $jobid'.tmp_res.txt' *.extended_profile.xls ref/$jobid'_ref.tmp_res.txt' no + # else + # printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % python ../../PostProcess/radar_chart_new.py $line % $jobid'.tmp_res.txt' *.extended_profile.xls no + # fi + # fi + # echo $line >> output.txt + # done < guides.txt + + # fi + + + #TODO aggiungere terza/quarta voce nella pagina del load +fi + +echo 'Post Process done' +cd ../../ +echo 'PostProcess\tDone\t'$(date) >> $1'/'log.txt +if [ ${17} = 'True' ]; then + python send_mail.py $1 +fi +echo 'Job\tDone\t'$(date)>> $1'/'log.txt diff --git a/OldScripts/submit_job.test.sh b/OldScripts/submit_job.test.sh new file mode 100644 index 0000000..dd3dbf0 --- /dev/null +++ b/OldScripts/submit_job.test.sh @@ -0,0 +1,339 @@ +#!/bin/sh +#$1 is directory of result for submitted job id (Results/job_id) +#$2 is genome_selected directory Eg Genomes/hg19_ref or Genomes/hg19_ref+1000genomeproject +#$3 is genome_ref directory Eg Genomes/hg19_ref +#$4 is genome_idx directory Eg genome_library/NGG_2_hg19_ref+hg19_1000genomeproject or genome_library/NGG_2_hg19_ref +#$5 is pam file Eg Results/72C1MNXDWF/pam.txt +#$6 is guides file Eg Results/LNHM6F3REO/guides.txt (both upload file and custom inserted) +#$7 is mms +#$8 is dna +#$9 is rna +#$10 is search_index +#$11 is search +#$12 is annotation +#$13 is generate report +#$14 is gecko comparison +#$15 is genome_ref comparison +#$16 is genme_idx_ref (for genome_ref comparison if search was done with indices) Eg genome_library/NGG_2_hg19_ref +#$17 is send_email +#$18 is annotation file EG annotations/hg19_ref.annotations.bed +#$19 is genome type, can be 'ref', 'var', 'both' +#Note that if genome_selected is not enriched, the python exe will force $15 as false + +cd $1 +rm queue.txt +jobid=$(basename $1) +echo 'Job\tStart\t'$(date)> log.txt +used_genome_dir=$2 + +#Start search index #NOTE new version 2.1.2 of crispritz needed +echo 'Search-index\tStart\t'$(date) >> log.txt +echo 'Search_output '${19} > output.txt +if [ ${10} = 'True' ]; then + #echo 'crispritz search-index' + crispritz.py search ../../$4 pam.txt guides.txt $jobid -mm $7 -bDNA $8 -bRNA ${9} -t #>> output.txt #TODO sistemare l'output redirection + + + if [ ${15} = 'True' ]; then + mkdir 'ref' + echo 'Search_output_ref '${19} >> output.txt + crispritz.py search ../../${16} pam.txt guides.txt $jobid'_ref' -mm $7 -bDNA $8 -bRNA ${9} -t #>> output.txt #TODO sistemare l'output redirection + mv ./$jobid'_ref'.*.txt 'ref' + mv ./$jobid'_ref'.*.xls 'ref' + fi +fi +echo 'Search-index\tDone\t'$(date) >> log.txt + +#Start search +echo 'Search\tStart\t'$(date) >> log.txt +if [ ${11} = 'True' ]; then + #echo 'crispritz search' + crispritz.py search ../../$used_genome_dir pam.txt guides.txt $jobid -mm $7 -var -t #>> output.txt #-scores $3 + + if [ ${15} = 'True' ]; then + mkdir 'ref' + echo 'Search_output_ref '${19} >> output.txt + crispritz.py search ../../$3 pam.txt guides.txt $jobid'_ref' -mm $7 -var -t #>> output.txt + mv ./$jobid'_ref'.*.txt 'ref' + mv ./$jobid'_ref'.*.xls 'ref' + fi +fi +echo 'Search\tDone\t'$(date) >> log.txt + +#Data processing + annotation + generating report +if [ ${19} = 'ref' ]; then + echo 'PostProcess\tStart\t'$(date) >> log.txt + echo 'PostProcess_output' > output.txt + echo 'Clustering... Step [1/3]' >> output.txt + echo 'Start cluster ref' + python ../../PostProcess/cluster.dict.py $jobid.targets.txt 'addGuide' 'True' 'False' guides.txt # > $jobid.targets.cluster.txt + echo 'End cluster ref' + + echo 'Start extraction top1 ref' + python ../../PostProcess/extract_top.py $jobid.targets.cluster.txt $jobid # > $jobid.top_1.txt + echo 'End extraction top1 ref' + + echo 'Start Scoring' + echo 'Calculating Scores... Step [2/3]' >> output.txt + python ../../PostProcess/scores_guide_table.py $jobid.top_1.txt ../../$used_genome_dir pam.txt guides.txt + echo 'End Scoring' + + echo 'Start summary by pos-guide' + echo 'Creating Summaries... Step [3/3]' >> output.txt + type_post='No' + python ../../PostProcess/summary_by_guide_position.py $jobid.targets.cluster.txt $7 $8 $9 guides.txt $jobid $type_post + echo 'End summary by pos-guide' + echo 'PostProcess\tDone\t'$(date) >> log.txt + + echo 'Annotation\tStart\t'$(date) >> log.txt + echo 'Annotate_output '${19} > output.txt + echo 'Start annotation ref' + if [ ${12} = 'True' ]; then + crispritz.py annotate-results $jobid.top_1.txt ../../${18} $jobid >> output.txt # > $jobid.Annotation.summary.txt , $jobid.Annotation.targets.txt + fi + echo 'End annotation ref' + echo 'Annotation\tDone\t'$(date) >> log.txt + + #Start generate report + echo 'Report\tStart\t'$(date) >> log.txt + if [ ${13} = 'True' ]; then + echo 'Generate_report' > output.txt + proc=$(($7 + 1)) + while IFS= read -r line || [ -n "$line" ]; do + if [ ${14} = 'True' ]; then #If -gecko + if [ ${15} = 'True' ]; then #If genome_ref comparison + + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -sumref ref/$jobid'_ref'.Annotation.summary.txt -gecko -ws + else + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -gecko -ws + fi + else + if [ ${15} = 'True' ]; then #If genome_ref comparison + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -sumref ref/$jobid'_ref'.Annotation.summary.txt -ws + else + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -ws + fi + fi + echo $line >> output.txt + done < guides.txt + + mkdir ../../assets/Img/$jobid + cp *.png ../../assets/Img/$jobid/ + echo 'Report\tDone\t'$(date) >> log.txt + fi + +elif [ ${19} = 'var' ]; then + echo 'PostProcess\tStart\t'$(date) >> log.txt + echo 'Start cluster var' + echo 'PostProcess_output' > output.txt + echo 'Clustering... Step [1/6]' >> output.txt + python ../../PostProcess/cluster.dict.py $jobid.targets.txt 'no' 'True' 'False' guides.txt # > $jobid.targets.cluster.txt + echo 'End cluster var' + + echo 'Start extraction top1 var' + python ../../PostProcess/extract_top.py $jobid.targets.cluster.txt $jobid # > $jobid.top_1.txt + echo 'End extraction top1 var' + + echo 'Start Scoring' + echo 'Calculating Scores... Step [2/6]' >> output.txt + python ../../PostProcess/scores_guide_table.py $jobid.top_1.txt ../../$used_genome_dir pam.txt guides.txt + echo 'End Scoring' + + type_post='No' + python ../../PostProcess/summary_by_guide_position.py $jobid.targets.cluster.txt $7 $8 $9 guides.txt $jobid $type_post + + echo 'Start pam analysis' + echo 'PAM Analysis... Step [3/6]' >> output.txt + python ../../PostProcess/pam_analysis.py $jobid.targets.cluster.txt pam.txt ${19} + echo 'End pam analysis' + + # Extract top 1 + echo 'Start extract top1' + echo 'Extracting Samples... (This operation has a long execution time, Please Wait) Step [4/6]' >> output.txt + python ../../PostProcess/extract_top.py $jobid.targets.cluster.minmaxdisr.txt $jobid # > $jobid.top_1.txt + echo 'End extract top1' + # Expand top 1 + echo 'Start sort' + sort -k4,4 $jobid.top_1.txt > $jobid.top_1.sort.txt && mv $jobid.top_1.sort.txt $jobid.top_1.txt + echo 'End sort' + echo 'Start calc samples' + python ../../PostProcess/calc_samples_faster.py ../../../dictionaries $jobid.top_1.txt #> $jobid.top_1.samples.txt $jobid.top_1.samples.all.txt + echo 'End calc samples' + + #Put right header into top_1.samples.all.txt + sed -i '1 i\#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tCluster Position\tDirection\tMismatches\tBulge_Size\tTotal\tMin_mismatches\tMax_mismatches\tPam_disr\tSamples\tReal Guide' $jobid.top_1.samples.all.txt + + # Summary by samples table + echo 'Start summary by samples' + echo 'Creating Summaries... Step [5/6]' >> output.txt + python ../../PostProcess/summary_by_samples.py $jobid.top_1.samples.txt $jobid ${19} guides.txt + echo 'End summary by samples' + + #Rimettere i samples nel file di cluster (solo nel top1) + echo 'Start creating final file' + echo 'Preparing Files... Step [6/6]' >> output.txt + python ../../PostProcess/reassign_sample_to_cluster.py $jobid.targets.cluster.minmaxdisr.txt $jobid.top_1.samples.txt $jobid # > $jobid.final.txt + echo 'End creating final file' + echo 'PostProcess\tDone\t'$(date) >> log.txt + + #Annotation of top1 with samples + echo 'Annotation\tStart\t'$(date) >> log.txt + echo 'Annotate_output '${19} > output.txt + echo 'Start Annotation' + crispritz.py annotate-results $jobid.top_1.samples.all.txt ../../${18} $jobid >> output.txt # > $jobid.Annotation.targets.txt $jobid.Annotation.summary.txt + # $jobid.sample_annotation.guide.sample.txt $jobid..sumref.Annotation.summary.txt + echo 'End Annotation' + echo 'Annotation\tDone\t'$(date) >> log.txt + + #Start generate report + echo 'Report\tStart\t'$(date) >> log.txt + if [ ${13} = 'True' ]; then + echo 'Generate_report' > output.txt + proc=$(($7 + 1)) + while IFS= read -r line || [ -n "$line" ]; do + if [ ${14} = 'True' ]; then #If -gecko + if [ ${15} = 'True' ]; then #If genome_ref comparison + + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -sumref ref/$jobid'_ref'.Annotation.summary.txt -gecko -ws + else + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -gecko -ws + fi + else + if [ ${15} = 'True' ]; then #If genome_ref comparison + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -sumref ref/$jobid'_ref'.Annotation.summary.txt -ws + else + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -ws + fi + fi + echo $line >> output.txt + done < guides.txt + mkdir ../../assets/Img/$jobid + cp *.png ../../assets/Img/$jobid/ + echo 'Report\tDone\t'$(date) >> log.txt + fi + +else #Type search = both + echo 'Report\tStart\t'$(date) >> log.txt + #Estract common, semicommon and unique + echo 'Start creation semicommon, common, unique' + echo 'PostProcess_output' > output.txt + echo 'Processing Search Results... Step [1/7]' >> output.txt + ../../PostProcess/./extraction.sh ref/$jobid'_ref.targets.txt' $jobid.targets.txt $jobid + echo 'End creation semicommon, common, unique' + + #Cluster common file, extract top1 and insert into semicommon + echo 'Start cluster common' + echo 'Clustering... Step [2/7]' >> output.txt + python ../../PostProcess/cluster.dict.py $jobid.common_targets.txt 'no' 'False' 'False' guides.txt # > $jobid.common_targets.cluster.txt + #Second false to not save the colum Pos Cluster and Total -> no needed for cat into semicommon + echo 'End cluster common' + echo 'Start top1 extraction common' + python ../../PostProcess/extract_top.py $jobid.common_targets.cluster.txt $jobid.common_targets # > $jobid.common_targets.top_1.txt + cat $jobid.common_targets.top_1.txt >> $jobid.semi_common_targets.txt + echo 'End top1 extraction common' + + #Cluster semicommon e uniq + echo 'Start cluster semicommon' + python ../../PostProcess/cluster.dict.py $jobid.semi_common_targets.txt 'no' 'True' 'False' guides.txt + echo 'End cluster semicommon' + echo 'Start cluster unique' #NOTE doing cluster separately does not create the right order of cluster (first clusters of uniq, then clusters of semi_common) + python ../../PostProcess/cluster.dict.py $jobid.unique_targets.txt 'no' 'True' 'False' guides.txt + echo 'End cluster unique' + + #Pam analysis + echo 'Start pam analysis' + echo 'PAM Analysis... Step [3/7]' >> output.txt + python ../../PostProcess/pam_analysis.py $jobid.semi_common_targets.cluster.txt pam.txt ${19} # > $jobid.semi_common_targets.cluster.minmaxdisr.txt + echo 'End pam analysis' + echo 'Start pam creation' + python ../../PostProcess/pam_creation.py $jobid.unique_targets.cluster.txt pam.txt ../../$3 # > $jobid.unique_targets.cluster.pamcreation.txt + echo 'End pam creation' + cat $jobid.unique_targets.cluster.pamcreation.txt $jobid.semi_common_targets.cluster.minmaxdisr.txt > $jobid.total.txt + + #Cluster of jobid.total.txt and extraction of top 1 + echo 'Start cluster of total.txt' + echo 'Calculating Scores... Step [4/7]' >> output.txt + python ../../PostProcess/cluster.dict.py $jobid.total.txt 'no' 'True' 'True' 'total' guides.txt + echo 'End cluster of total.txt' + echo 'Start extract top1 total.txt' + python ../../PostProcess/extract_top.py $jobid.total.cluster.txt $jobid # > $jobid.top_1.txt + echo 'End extract top1 total.txt' + + #Scoring of top1 + echo 'Start Scoring' + python ../../PostProcess/scores_guide_table.py $jobid.top_1.txt ../../$used_genome_dir pam.txt guides.txt + echo 'End Scoring' + + #Summary guide, pos #NOTE the script automatically counts only for top subclusters + echo 'Start summary by guide and position' #NOTE change to top_1 if in sum by pos want to see cluster count of top1 + python ../../PostProcess/summary_by_guide_position.py $jobid.total.cluster.txt $7 $8 $9 guides.txt $jobid 'Uniq' + echo 'End summary by guide and position' + + #Top1 expansion + echo 'Start sort' + echo 'Extracting Samples... (This operation has a long execution time, Please Wait) Step [5/7]' >> output.txt + sort -k4,4 $jobid.top_1.txt > $jobid.top_1.sort.txt && mv $jobid.top_1.sort.txt $jobid.top_1.txt + echo 'End sort' + echo 'Start calc samples' + python ../../PostProcess/calc_samples_faster.py ../../../dictionaries $jobid.top_1.txt #> $jobid.top_1.samples.txt $jobid.top_1.samples.all.txt + echo 'End calc samples' + + #Put right header into top_1.samples.all.txt + sed -i '1 i\#Bulge_type\tcrRNA\tDNA\tChromosome\tPosition\tCluster Position\tDirection\tMismatches\tBulge_Size\tTotal\tMin_mismatches\tMax_mismatches\tPam_disr\tPAM_gen\tVar_uniq\tSamples\tReal Guide' $jobid.top_1.samples.all.txt + + #Summary samples + echo 'Start summary by samples' + echo 'Creating Summaries... Step [6/7]' >> output.txt + python ../../PostProcess/summary_by_samples.py $jobid.top_1.samples.txt $jobid ${19} guides.txt + echo 'End summary by samples' + + #Rimettere i samples nel file di cluster (solo nel top1) + echo 'Start creating final file' + python ../../PostProcess/reassign_sample_to_cluster.py $jobid.total.cluster.txt $jobid.top_1.samples.txt $jobid # > $jobid.final.txt + echo 'End creating final file' + echo 'Preparing Files... Step [7/7]' >> output.txt + echo 'PostProcess\tDone\t'$(date) >> log.txt + + #Annotation of top1 with samples + echo 'Annotation\tStart\t'$(date) >> log.txt + echo 'Annotate_output '${19} > output.txt + echo 'Start Annotation' + crispritz.py annotate-results $jobid.top_1.samples.all.txt ../../${18} $jobid >> output.txt # > $jobid.Annotation.targets.txt $jobid.Annotation.summary.txt + # $jobid.sample_annotation.guide.sample.txt $jobid..sumref.Annotation.summary.txt + echo 'End Annotation' + echo 'Annotation\tDone\t'$(date) >> log.txt + + #Generate report + echo 'Report\tStart\t'$(date) >> log.txt + if [ ${13} = 'True' ]; then + echo 'Generate_report' > output.txt + proc=$(($7 + 1)) + while IFS= read -r line || [ -n "$line" ]; do + if [ ${14} = 'True' ]; then #If -gecko + if [ ${15} = 'True' ]; then #If genome_ref comparison + + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -sumref $jobid.sumref.Annotation.summary.txt -gecko -ws + else + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -gecko -ws + fi + else + if [ ${15} = 'True' ]; then #If genome_ref comparison + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -sumref $jobid.sumref.Annotation.summary.txt -ws + else + printf %s\\n $(seq 0 $7) | xargs -n 1 -P $proc -I % crispritz.py generate-report $line -mm % -annotation $jobid'.Annotation.summary.txt' -extprofile *.extended_profile.xls -ws + fi + fi + echo $line >> output.txt + done < guides.txt + mkdir ../../assets/Img/$jobid + cp *.png ../../assets/Img/$jobid/ + fi + echo 'Report\tDone\t'$(date) >> log.txt + +fi + +cd ../../ +if [ ${17} = 'True' ]; then + python send_mail.py $1 +fi +echo 'Job\tDone\t'$(date)>> $1'/'log.txt diff --git a/OldScripts/summary_by_guide_table.py b/OldScripts/summary_by_guide_table.py new file mode 100644 index 0000000..a6ff2af --- /dev/null +++ b/OldScripts/summary_by_guide_table.py @@ -0,0 +1,102 @@ +#OLD, VIEW summary_by_guide_position.py FOR NEW CHANGES + +#Script che crea la tabella Summary by Guide, dato in input un file targets.txt, conta il numero di targets trovato per ogni combinazione di +#mms-bulge. Inoltre, se è presente la colonna Var_uniq, calcola il numero di Var_uniq per quella categoria di mms-bulge + +#sys1 file risultati +# sys2 mms +# sys3 bulges DNA +# sys4 bulges RNA +# sys5 guida +# sys6 is jobid +# sys7 is type of post-process done ('No' -> no post process done, cannot count uniq_var | 'Uniq' -> post process done, can count uniq_var) +# inserire anche numero guide + +#BUG nel conteggio -> ricontrollando è stato risolto(?) +import sys +import numpy as np +import pandas as pd +# with open(sys.argv[1]) as targets: +# mms = int(sys.argv[2]) +# bulges_dna = int(sys.argv[3]) +# bulges_rna = int(sys.argv[4]) +# total_count = np.zeros((mms + 1, bulges_dna +1 + bulges_rna +1 + 1)) +# print (total_count) +# header = targets.readline() +# for line in targets: +# line = line.strip().split('\t') +# total_count[int(line[6])][int(line[7])] = total_count[int(line[6])][int(line[7])] + 1 + +# np.savetxt('summary_table.txt', total_count, delimiter='\t') +guide = sys.argv[5] +type_post = sys.argv[7] + +with open(sys.argv[1]) as targets: + mms = int(sys.argv[2]) + bulges_dna = int(sys.argv[3]) + bulges_rna = int(sys.argv[4]) + total_count_x = np.zeros((mms + 1, 1)) + total_count_dna = np.zeros((mms + 1,bulges_dna + 1)) + total_count_rna = np.zeros((mms + 1,bulges_rna + 1)) + header = targets.readline() + if type_post == 'Uniq': + total_count_x_uniq = np.zeros((mms + 1, 1)) + total_count_dna_uniq = np.zeros((mms + 1,bulges_dna + 1)) + total_count_rna_uniq = np.zeros((mms + 1,bulges_rna + 1)) + for line in targets: + + line = line.strip().split('\t') + if line[1].replace('-','') == guide: + if line[0] == 'X': + total_count_x[int(line[6])][int(line[7])] = total_count_x[int(line[6])][int(line[7])] + 1 + if line[13] == 'y': + total_count_x_uniq[int(line[6])][int(line[7])] = total_count_x_uniq[int(line[6])][int(line[7])] + 1 + elif line[0] == 'DNA': + total_count_dna[int(line[6])][int(line[7])] = total_count_dna[int(line[6])][int(line[7])] + 1 + if line[13] == 'y': + total_count_dna_uniq[int(line[6])][int(line[7])] = total_count_dna_uniq[int(line[6])][int(line[7])] + 1 + else: + total_count_rna[int(line[6])][int(line[7])] = total_count_rna[int(line[6])][int(line[7])] + 1 + if line[13] == 'y': + total_count_rna_uniq[int(line[6])][int(line[7])] = total_count_rna_uniq[int(line[6])][int(line[7])] + 1 + + + # np.savetxt('summary_table_x.txt', total_count_x, delimiter='\t') + # np.savetxt('summary_table_dna.txt', total_count_dna, delimiter='\t') + # np.savetxt('summary_table_rna.txt', total_count_rna, delimiter='\t') + tab_summary = pd.DataFrame(columns = ['Guide', 'Bulge Type', 'Bulge Size', 'Mismatches', 'Number of targets', 'Targets created by SNPs']) + for m in range(mms + 1): + for b_d in range(bulges_dna +1): + tab_summary =tab_summary.append({'Guide': guide, 'Bulge Type': 'DNA', 'Bulge Size': b_d, 'Mismatches': m, 'Number of targets': total_count_dna[m][b_d], 'Targets created by SNPs':total_count_dna_uniq[m][b_d] }, ignore_index = True) + + for b_r in range(bulges_rna +1): + tab_summary =tab_summary.append({'Guide': guide, 'Bulge Type': 'RNA', 'Bulge Size': b_r, 'Mismatches': m, 'Number of targets': total_count_rna[m][b_r], 'Targets created by SNPs': total_count_rna_uniq[m][b_r] }, ignore_index = True) + + tab_summary =tab_summary.append({'Guide': guide, 'Bulge Type': 'X', 'Bulge Size': 0, 'Mismatches': m, 'Number of targets': total_count_x[m][0], 'Targets created by SNPs':total_count_x_uniq[m][0] }, ignore_index = True) + else: + for line in targets: + + line = line.strip().split('\t') + if line[1].replace('-','') == guide: + if line[0] == 'X': + total_count_x[int(line[6])][int(line[7])] = total_count_x[int(line[6])][int(line[7])] + 1 + elif line[0] == 'DNA': + total_count_dna[int(line[6])][int(line[7])] = total_count_dna[int(line[6])][int(line[7])] + 1 + else: + total_count_rna[int(line[6])][int(line[7])] = total_count_rna[int(line[6])][int(line[7])] + 1 + + # np.savetxt('summary_table_x.txt', total_count_x, delimiter='\t') + # np.savetxt('summary_table_dna.txt', total_count_dna, delimiter='\t') + # np.savetxt('summary_table_rna.txt', total_count_rna, delimiter='\t') + tab_summary = pd.DataFrame(columns = ['Guide', 'Bulge Type', 'Bulge Size', 'Mismatches', 'Number of targets']) + for m in range(mms + 1): + for b_d in range(bulges_dna +1): + tab_summary =tab_summary.append({'Guide': guide, 'Bulge Type': 'DNA', 'Bulge Size': b_d, 'Mismatches': m, 'Number of targets': total_count_dna[m][b_d] }, ignore_index = True) + + for b_r in range(bulges_rna +1): + tab_summary =tab_summary.append({'Guide': guide, 'Bulge Type': 'RNA', 'Bulge Size': b_r, 'Mismatches': m, 'Number of targets': total_count_rna[m][b_r] }, ignore_index = True) + + tab_summary =tab_summary.append({'Guide': guide, 'Bulge Type': 'X', 'Bulge Size': 0, 'Mismatches': m, 'Number of targets': total_count_x[m][0] }, ignore_index = True) + +#print(tab_summary) +tab_summary.to_pickle(sys.argv[6] + '_summary_result_' + guide +'.txt') \ No newline at end of file diff --git a/OldScripts/summary_position.py b/OldScripts/summary_position.py new file mode 100644 index 0000000..4c88219 --- /dev/null +++ b/OldScripts/summary_position.py @@ -0,0 +1,103 @@ +#OLD, VIEW summary_by_guide_position.py FOR NEW CHANGES + + +#Script for the Summary by Position table. Given in input a targets.txt ordered by clusters, for each cluster (position) it saves: +# -chr +# -pos +# -best target sequence +# -min mms of cluster +# -min bulge of cluster +# -Total targets with 0 mm 0 bulges +# -Total targets with 1 mm 0 bulges +# ... +# -Total targets with n mm 0 bulges +# -Total targets with 0 mm 1 bulges +# -Total targets with 1 mm 1 bulges +# ... +# -Total targets with n mm 1 bulges +# ... +# -Total targets with n mm b bulges + +# argv1 is targets.txt ordered in clusters +# argv2 is job_id +# argv3 is guide +# argv4 is mms +# argv5 is bulge (max between DNA and RNA) +import sys + +job_id = sys.argv[2] +guide = sys.argv[3] +job_id = '0YT6LD1ECN' #TODO cancellare, è solo temporaneo per i test +guide = 'CTAACAGTTGCTTTTATCACNNN' #TODO cancellare, è solo temporaneo per i test +mms = int(sys.argv[4]) +bulge = int(sys.argv[5]) +#cluster_count_mms = [0 for i in range (mms + 1)] +#cluster_count_bulges = [0 for i in range (bulge)] #NOTE pos 0 is 1 bulge, pos 1 is 2bulges ... +#cluster_count_bulges = [[0 for i in range (mms + 1)] for i in range (bulge)] #NOTE pos 0 is 1 bulge, pos 1 is 2bulges ... +count_targets = [[0 for i in range (mms + 1)] for i in range (bulge + 1)] +with open(sys.argv[1]) as targets, open(job_id + '.summary_position.' + guide + '.txt', 'w+') as result: + result.write('#Chromosome\tPosition\tBest Target\tMin Mismatch\tMin Bulge') + for b in range(bulge + 1): + for i in range(mms + 1): + result.write('\tTargets ' + str(i) + 'MM' + str(b) + 'B' ) + # for i in range(bulge): + # result.write('\tTargets ' + str(i + 1) + ' Bulge') + result.write('\n') + + for line in targets: + line = line.strip().split('\t') + if line[1].replace('-','') == guide: + break + # line = targets.readline() + # if '#' in line: + # line = targets.readline().strip().split('\t') + # else: + # line = line.strip().split('\t') + + current_cluster = line[-1] #NOTE with new version of clusterization, must use line['Cluster Position'] and not line['Position'] + result.write(line[3] + '\t' + line[-1] + '\t' + line[2] + '\t' + line[6] + '\t' + line[7]) + mms_current_line = int(line[6]) + bulge_current_line = int(line[7]) + #cluster_count_mms[mms_current_line] = cluster_count_mms[mms_current_line] + 1 + #cluster_count_bulges[bulge_current_line - 1] = cluster_count_bulges[bulge_current_line - 1] + 1 + count_targets[bulge_current_line][mms_current_line] = count_targets[bulge_current_line][mms_current_line] + 1 + for line in targets: + + line = line.strip().split('\t') + if line[1].replace('-','') != guide: + continue + + if current_cluster == line[-1]: + mms_current_line = int(line[6]) + bulge_current_line = int(line[7]) + #cluster_count_mms[mms_current_line] = cluster_count_mms[mms_current_line] + 1 + #cluster_count_bulges[bulge_current_line - 1] = cluster_count_bulges[bulge_current_line - 1] + 1 + count_targets[bulge_current_line][mms_current_line] = count_targets[bulge_current_line][mms_current_line] + 1 + else: + # for m in cluster_count_mms: + # result.write('\t' + str(m)) + # cluster_count_mms = [0 for i in range (mms + 1)] + # for b in cluster_count_bulges: + # result.write('\t' + str(b)) + # cluster_count_bulges = [0 for i in range (bulge)] + for t in count_targets: + for t_c in t: + result.write('\t' + str(t_c)) + count_targets = [[0 for i in range (mms + 1)] for i in range (bulge + 1)] + result.write('\n') + current_cluster = line[-1] + result.write(line[3] + '\t' + line[-1] + '\t' + line[2] + '\t' + line[6] + '\t' + line[7]) + mms_current_line = int(line[6]) + bulge_current_line = int(line[7]) + #cluster_count_mms[mms_current_line] = cluster_count_mms[mms_current_line] + 1 + #cluster_count_bulges[bulge_current_line - 1] = cluster_count_bulges[bulge_current_line - 1] + 1 + count_targets[bulge_current_line][mms_current_line] = count_targets[bulge_current_line][mms_current_line] + 1 + #Write result for last cluster + # for m in cluster_count_mms: + # result.write('\t' + str(m)) + # for b in cluster_count_bulges: + # result.write('\t' + str(b)) + for t in count_targets: + for t_c in t: + result.write('\t' + str(t_c)) + result.write('\n') \ No newline at end of file diff --git a/OldScripts/tmp_top1_annotation.py b/OldScripts/tmp_top1_annotation.py new file mode 100644 index 0000000..37fd0ba --- /dev/null +++ b/OldScripts/tmp_top1_annotation.py @@ -0,0 +1,54 @@ +''' +cambia l'annotazione Annotation.txt in un fac simile a Annotation.summary.txt, ma +contiene i conti solo del top1 presi da jobid.sample_annotation.guida +Il report è poi generato con questo var (uso radar_chart_new) +''' +# argv 1 is job id +# argv 2 is dir with sample annotation +import os +import sys +from os import listdir #for getting directories +from os.path import isfile, isdir,join #for getting directories +job_id = sys.argv[1] +ann_samp_file = [f for f in listdir(sys.argv[2] + '/') if isfile(join(sys.argv[2] + '/', f))] + +summary_total = dict() +summary_per_guide = dict() +annotation_order = [] +for f in ann_samp_file: + if '.sample_annotation.' in f and '.population.' in f: + guide = f.split('.sample_annotation.')[-1] + guide = guide.split('.')[0] + # summary_total[guide] = dict() + summary_per_guide[guide] = dict() + with open(f) as ann_samp: + sum_tot = ann_samp.read().strip().split('-')[1].split('\n')[1:-1] + # summary_per_guide[guide] = sum_tot + for ann in sum_tot: + tmp_ann = ann.split('\t') + if tmp_ann[0] not in annotation_order: + annotation_order.append(tmp_ann[0]) + summary_per_guide[guide][tmp_ann[0]] = tmp_ann.copy() + # print(tmp_ann) + try: + summary_total[tmp_ann[0]] = [ int(summary_total[tmp_ann[0]][x]) + int(tmp_ann[x]) for x in range (1,11)] + except: + summary_total[tmp_ann[0]] = tmp_ann.copy() + +# print(summary_total) +# print(annotation_order) +with open(job_id + '.tmp_res.txt', 'w+') as result: + result.write('-Summary_Total\n') + # result.write('targets\t' + '\t'.join(str(x) for x in summary_total[tmp_ann['targets']]) + '\n') + for pos, a in enumerate(annotation_order): + # print(a) + # print(summary_total[tmp_ann[a]]) + # print('tmp a',tmp_ann) + # print('tmp a in pos',tmp_ann[pos]) + # print('sum tot a', [str(x) for x in summary_total[a][1:]]) + result.write(a + '\t' + '\t'.join([str(x) for x in summary_total[a]]) + '\n') + for g in summary_per_guide: + result.write('-Summary_' + g + '\n') + for pos,a in enumerate(annotation_order): + result.write(a + '\t' + '\t'.join(summary_per_guide[g][a][1:]) + '\n') + diff --git a/PostProcess/20130606_sample_info.xlsx b/PostProcess/20130606_sample_info.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..ce537cf4fd7035a371ceaf4beca30435dee7abf8 GIT binary patch literal 990287 zcmeFWRa;%b)+UO(1p)+j3GObz3GVLh?h@Q31b26Lg1bwAg}b|3uq0=a?A_< z7mJ(u)T}w`wJ}OT8WQR~7z`L37#J7{7*uNFQ7AYV*c~hw7zP*|gtmyior|fRi@u7d zgQ>GFgNLmRaXu6TbuJhL@cjS#`@gsX6G@wLeN69%E@gefidxKQbMu*z7+#&r$af&; z?rkbuY(ZGTEk7&am`!5QKCextH}P-&N>~t9o z5J$O-c(XFJsAI!j3&guKkb5>HzIWgIj*;P$6OO(#)HF-Mvx-MXdqNf6)7Vxv5bH{B zD_!%qJ!a7LjuBr-8Lkapq7cDE^1~|5SA{?qdiCx$28ytmE(kUtEFVkF%G(}ZYW+xf zSipsI&5lC)K81yut+f*kJ){f2MVl7PT$ZY!eSr0+c6o;BP990=CwB1Hy$>$IyY*!F z>IB>0FW`o2$?!S<3~XP^LgRM%b=M0W^gz3Qq3_N5z8Z5xau~0bauEnxPJ>k^-NCzf+f8n%=*5z_vun51D?9{}E;KagMw z{~LRKCseYL0rsu~`+qT2|Hr3S#>>j}Gogk2lw9jg_uXwI z4pzvrLsN2x3xSk5w=_ckGvY?l$sZ~vQi!e^Sj4}3csZGIZ^p#$U4qus0Ff*PA6@Ap zwJP3Y{j2diEEa}VNM{52mgGB*`TP0j5HFc0MVqFtbQMd~xw4~z4uq!CFvi<_%R0;L z22A_L7T+b74MluZlim4V=lremN1LTSEtTUL+|L~5{K=n0rcgwwGS~A1a$OH)@4=h?cxE(+BxB0sfQ`I9a*hw(OHiWmRX+dYEmuz7 zArXTM?_nEv^87wscTL|1bt_aebH$%zBOgnZt2}P!d3K<@TV)<*XB)^G0jUowFM76?51fA?QUqO5g46H@49&@b5CmmP9j zknP;DDU#QE8<7F}eh{TwZO7Nwmx#)7CF%`|J)^i_eg2V~_Eo)%t8EVgcouAZDW|)l#kf4BK zDMOnI{E+7NSz~hLQ18MFNzYV*O0d(<;-u|RpCTKKHx&>6G|4K?;qaxMbwZz4H=^YX zW!FMDkeA!Q%iS=C7?oB+kRI&tOn4clHldm|Fii}>CeR@zJQ_43_S^U}-31(I8%Pi# zN?vtzh){>J-Z1k`jQUsGk3fru=PWG>kgBKI!)Sa&e5z>JQ&f(&T#;s_ny?`9yZxYS zE^*x@)4?S3%8TP@xOL14AmsmqS{n1uTr~iMpP|6OP=O}^sQpinMXSi#uQDNZqAvNN zxyb)IcN5Aal}=~>q-bKhn2nhh5i$}_H5wCT_{X<#7PV#7kXy8P^rxi2x9m*!Ci1Ta zmDA<9GoK~lsjH>)+@OCsw%}(e8WAW{0NyH;(=&T}d?=p~#PJfd;2j7xhSlg)jRPC7 zk2yDyeFXE~t9fN^BG1w-z!s@?Rt>B=SqMMWQ-L+_=$oO9Ut9!dNLn0Ay9?qxfAfi$^<(GEI))& zBP~X@3~kB$n7QR+oo=N)Eym#cY%MC}mu2SY^o>4VF_S)FhOQk>LNR-wfKPWwljrL$ zTa;fnoyz*JCdH;LaqUi5DxJk%$213cPcQ=5?gaDUtW3ERJ3iJ9E&36qdV<)xZWW|~ zz-K>w&8r*C)Zc(k{htuDVXDL#Muh?sJHY_^@c%>5*}~M+#hLN%Keji)tMpy1UlWht zdr3bRoPHD->?4Saz{Mz6z*b4Zk!Edpkx`VO*Gk~ix@nwSsnJIz{uX`}?d(O^OpZ}@ z_oir=f&^O;h}uZZ;!|4^WEK)mYRmihx>{!Mj}GLr^kJJd;LF)euTfPe1Dy+ zf?gQAJO11~?r)!;b`y4W{{Bhy`|!`>?;pc8et#ZT`};w(dsq9btBc+I{J(y$XK-!` zygpn#{c4%qy+qabe|>(uzkQ*4ye0q3U4ExzFgYEnKj&jrPlrv}3@AbxOYP-GW zPZl4c->>`Ku&`gdVSYYe9&gypZ|=82FH~2+ONKYQ>05n&Uu}+F+`a&>9lreYuzh`d zIkI#3HAg_;7rW{6^ZX&|X4apF{p-`09Kr3bKWRyqf^?qpe-cdoJne?neBC7U^Iais z=VYC^%Wmgf$J@K>|Jm&`KoGvF@9X<`e|}2TaQVBSw0ZE9Q_x4S^~lfH=kYe<`GxlS z@yFM%e%M>DCO0oHjHorgpNP7Hey%P)ovQm)`T2Ui{(8PT1zl`|N?K~%2T#>6 z1^Ikmcee?uJzkyf0ROMM$HQZxwSYMUo~X6WUpI@t zw^w23SRFYKo~|C@xp%s21pGv;1J)0J{>l9P%4(uvw?ev!rHOL33es0{c4!b3^zr5i z7tZ~&O|-f76!K}rbxQt724_tF_n*CRh(rAA6pCTnF~@RO`-9aC{8qm|cfZ3fy9Itf zAyg@4#zy*cWc#MDm%0*>yHbtX>!;M{Zx3~m+97|JiqAzXN6?=3WY)SN;NI?oS5LZvX~DfzFE{t8Hwd7MBIY~`(Zo0(= zolf#KLv%28e}V$G?8Py=c!<;`&PVmSfimGs_U8IDTHe}G+F7pp`rhu<@(rcYFe5v@ zjOw)$B2%}_$?%oW*z$8zxCLwCvQ-r;w%X>@Ya6Sk9O;PSEsCbP39i=?gSxtboSgRp zhkVF*;#@9N*$!>NKzKHHpQ7 z*OgJ}8i~71;ga(d6e~BWW|>18{mc=*KI;bu>|M<`E|!UikJ6 zq7$5cJFC|T@q%5_od+!H#Kcu`Pj$gS0LPiB(mIDdUVUK>{vIZJd1^PicY)<_<5SQC zu7(NnkeH^b>2e%Hm{+;-c`@Y{7v$b%0s~W-5-vGMff>lCuYR(=K40h@c(-c#d1U3$ zfW)VTH4Ob38D2Tgak?NKF_W@du^e?oI?*ZQ%8n);)=!y z<~zB<1AA>G*93XWex6l#wUFA>o^#s3iQ}v1ByKf9V?rQ@v}mS}bTL9;J-#TH%~$v| z!bqh|&CG)r_aO#CT%15Wzur1hw8#L{w4B-0E?-J}><;0!tiAb5`_t&pIM0J5bjuodIt6iZBar!_hybc+y|bnBA)mLl$@(JR%2Tq(DGeaLMTC$ zK}xX&{@>6Kb~Kx8od{PMdubNtiY;kWl_P#&9@xpcCR~y@mlTiHk71x5Tgw(*H2+|e znsCD^l>P}rA^&$nrW?se7LoKZpI}ob*^?GghW*is5%a(@WEA zp;uyEx7q3~oZpJH$lo4NZHgSkdencp9RMNrXR6bri64st>#8+yQhQpVIbhaUnA~e0 znv!mG7}#*GLBx*ql+98pTd;^8Wsry$7|yBcs=9t4DZT7~st{{$-Y~05cYF7z;>S%g z`Ko}@LQb}l*W?&&u-?vC-8)JtOD3xBv&o{p6rr9{l?A)o>Sf`NP7~l}`4X+ir3eI& z!ijRl3dJrQ)QY&!Iv-6{SJ^+5a#MQ2n?|wR7cjA(f7ybR1x9UKC`1c@vl&kb|*>I&a)rhGTKJyK!S|mH1jejwCs?hL`xt0{7upAd_(vn0vCL7{R$x=JFr@w%5Id~m$R%sCU% zbeMRzbo4#`9)<#e7t#rH6ECsoJ(E|&d0v2p)MB;k!z9Iv$=&=ccsg!vT zdXZv}WZ5{+fbxuI{O=Tlrtn3Bg|F$~e@pVEdI#ym+i2<9<-<$S5Xe#=EqJ%mAbj4M z3REEj;N6Xcf#+Gzw+^SactkW_O)7-Kgb`!}nmO`Df>Y#46E17{52cIHQmmx-K7KBi zOfhJdoR@OId{? zh2Xh8B9C)F2|>N+6zpYZL6^~7B1>K1Kt*4Swy0_wvuoXoG(%48ZPc-7TV1|VnYtsn ziC&pdU$MN}D2zG2XF{LAjczs7W2aXwlV;L}5+&E);kuS+dPHzmmj+(J;@+4D7hF<= zg6^=^@Pl2ddPS3IMoC|)8h3AYUR8I7xY{2Wy1nU;UWwr%zI+i{vRj5oE7i^o;_&>iByLAYr34^kgFes%8^5(XV{lX5%q2JbSlAP~F z(G`DHELP7sp#R*gl;$6v?spUi%X;b@qeQTw8K<&=muP2*=1|pdJ8xK}by%k2yq3G- zu<0ubBj?HwtSYDYrd0Gr#k5R_q9&16KPXb^wMh6PDFUjP)N|H=MIk<~^;%2wuqaF; zH2+{vhcoFBC60;bgc)_mEERXPS;gv%?gT+jh=y7vC}s)31XE|x>E{D#lEjYiU5RF_ ztu(5`a+C}x1T>9yu>C?SU(*vHCy_3Hn1$A1Rg%;_?mQwjO+`Z9Sk$2JQ2YzQAdy26#an#TU(Xco{7oF^p| z?q4!~$Jj!G8)QH`Ay!dp(JD0?-zb(C{c$ro!nFmP(?J5rltST=e<;ijM zsA`Z7u5O5gx@RnN@vgxpxEZMPFeBaTZWm(t%lRE?NrO-Xh}vK;alQ(eCO)c~I?>X;Vx+$QtnOvwjAyYUM_5gvPTHq2j}VlG6lJoo4Y1PPVk6an;1n zs#8=z!|6FXQ2w*+|JJ)nIGE@cxe+_x2y}&5dGXK?+x+wq3?Sh}PJg|!Xf$6C#fjxK zZ6QJsiwB{v#`F$w%5yr&Hh`SjxKPpAV^Pf&Qk5nNrC0zrGmtNh1^-hRV)K86F+_EU zifOT3nKG*o<5T>uCC>Gl*4VHdXR(3)(3~}ju$eF>N)zs z+?M*O_x)?O=9xCDX>`B%%Vgll(kR}nh88JRvPkh}oH#|in^-9BGH8$`JcBrQdqI$!Z(<))u^pFoa4cPyiLcVCI+rgRw_~5D%!G=^RKH9Ksnq zAcl3zEtgPA^;i7qBI+jR=gjhcmRWWa?;SpJuU4~;+Y*{^(iPwXO-v7lVrI0<^M`KI z@iAIO5$=_@9-qHPF@5<87)*t!P+LN$&@mRUWv!*8P!BrcL!DP3>Ik=N4?yKujMJf&gr)K&=|5&%z3Qi*I*chDVSPx)<(iURf~YMBzN~qj1yN2eh`W z|7G}}#NeHpv^&0tD7OQ~w&ELffv{`wN^$;$gFKsa&!`;|_%a3lsyp<~d^})rr2t;Y zdgBL}A|F`rB;zX%)%M9q6I93vvnr32Ddzt@Nx<9ixQH*EV_N_22qDoBE2FVtS5m4% zHf8DVTeD)J*!(M9#~~&-=Mg2?ZLj5y`c?ppfRJVNU&Re|Na$Dyd(s$yNZ{{9YX>3M4#aE3{xOydl!%>@ujZn?Sl^hg`NY6+d8 zmo75IQSwqsdSGYX%fd3#W-xxfgbHhR5j;BdsB6xGG#O@@+1vRew;37}4@?#+VYs0; z)8Y=~1Z7=4a`&s34)N(&!p9_TAQ)zfgOgDW#5AuAB#~nx6z-CVu`ei63Fxs1A(V-JlaA&%>t5YGGz1*9$VBS(|cwEBL|a zh>J`mdUEB(f+eVO0qB*Mp}D%#(%4_nSAD9NiiH@Bk(a$1R;tl?Dy;>C{L{127rqY8 z6iNvA3@%j$Q~HpWKOWXo;GW^1_Cwue{)PN$7rQEWj3Pybn8eza$~f$cM}iTc%+I-_ zc~*l&uIvBRED(;kY4TNtM?9go@E4mUz|sIIQ0)hdNEt}_%1d4v+m?m+h=!|U;AW(H z?c1_}|6$gA_0)^sAKujGFI%`DMi+Vx(_)Q9eA{Tr_F~0!h`hcv8vhTe`SLPim?O>rHtvFI8g8 zPd3)DNdbcA@&|a~IPfLo``e5tW?~}6QXc-xd;RB>(zwhu&6Isq%2q20r>i7#1>195 zVi#}cK3`w@&)5lLBh z?AO2g7e^kqlK`PKc%9oLGvcRR0tW}i*GR)4IJc<*q8QGEwdx*JEU8(B%qAt zoSo@KKP2&bpCmr&&Wjk|4_Lt_G6W4bv-i>tvf&q8U;kw~b96l9zg>nh_x(Cr(TqZ0 z#(~FU>mcuSv-EeU3Tmq6$KA%2#{!uefl2Lno1%F3M5-h`kLxFF-y+5Q&ISY0q%I)# z#z`lsEG0*kpm2vR$~8l!+h-z6QA8I*4P4X)K}hG_V?{fkm}5D{cezQkJwCwrG8~#O z;5X^1UKPi0OaUQegq^S@h$YD zPl=9@c+Ok`_@fL=`ONc0eFp(APj=7l3xK}emC0X)$~N~NW}fO9`vmKn_6no`r(Bo3 zgOs|0{40hJW^k#D;+MhAc4BDw6&U}@u57aVe;NvnN;Z$+)LWD-0DhuSt`W0CM4sC~ zqay#-sK_7UH&yCoX3NhS?;Z@w*ed`TI&ke4=U;>lNqW!Yb#}4Gy*#@2Et(sq2whw` z(8jX&7}2AFIhMQGO0Xfj{5~?`C)ERtY-R!DzE>k>3i>_JpW!c?DDMo^xlnOW9F$K& z4*l<5kc6%$AgVlm73=15IQLOMa9OG8U7?M169Hq{%I{s-JtON5K>XD7#1^7BP_%zG ziH|b#BBGB2-W&@Hu0Fh#@zIIRDXUV!kg`Rk(0lan5Mo+|)|XS%vN5stGyadjTKUYC zLyA_k(Q5g4Qvi043KtbP?6KJY>b8?rT_`G) zv~%9HPkmRjO6&V9(hG1{e1E&jfrOAes%*WsroDnLVB)fhlL!gUmWfXI z$V44V`o`(OJSN9FK=<@Ml=F%6cWJr4Z#t(S0IZyq<++UCh04*XldOo=pY;zn2D)0P zqb#RIWrE^wVZf78Jt)<-mm)%?xO!~tJV!MVzzz9j^FEKW{+lFiTYQ9yL9`Jf77AJ> zEv>An{Y>&(Lm}0hz&ze&(2G)S=12qD-tZW?oTEdI-Pn9ug)WQL(=1N9z}z9zHdMqv z(}QPXPpy5jv6s?|Jfa%5U6eiEkBhmWWi>}KX-H(mC z^ZCtoRWH)O#0nta6RdmLPpfeI+LmVm_zN<@Pa3~6iw}OdVjJ)}0BdMXyY4vtfJW-W z5}aTM`e$8MESW3On23JCz{oCN&EudL7pKM+XmGUCGF}->y4*6{vyAS2C{1RL;-G7+ zy@HCpPOO#r*`=oN$ajxA@W-8yUOWafhCiiF(lbBliG4!ncg>p%$(me8E0|#to>p+1 z-N6?tHw4<@`vENlKWP8Rj=x);mx6$r@+q1zM7xLqR@FMTR<2@`%ElqzEluj!o!+8f zzw2e55T&I%n`;EiqCyTCHb|i^`B+G=SnyVkx=!$Q9*)$R*?nN!PHYf5G?j+9Z=;k#=(qZEH_}OhdD7}n9_fGRXog_IINa^JjH57-)7+K%BQK{0|G!?g) zK3!<02af(~5wz&o-Z1<)p+tC<{20sn9(5vj_P zq{wO0?ILAo zhhX;|!~Wz|D%eu82R0X>A7Vn^sU3M{><%6^`ej8(s9t}Bn*J(Cb8`afpcqx7g{Sk- zItGLPsqI^!Xa~*vw@{<3KwI3@w`eflQD)jVMbC7BqYy^a!#wwc;9HjRv3w|T>$T0N z9Zsz;H;!fWp{FXXPaOG99($o>)~|}+{)HY`6ghjEZ>aLp1Q}am>YEm9^GXF}$+#5y z$nn?ZMIEfuTVggB?ZB2}GOLcjKd)plBJ0$$eqtQv@Tn0~ue)V~ zx5wr}f_vo)l4O1f1KW&p(0~P7^fdR*FH!DMju~glu9J^=REtG6EJ|&cy_@I{#YDC_ z@Pm?GnH4T7D!>$zHi%LSCy@{6P@4M41fHrny22-n2`3Jkhf6dGAp5k^gf(d)(F+uN zMkG-ZO3~Eh3lY>5X)=8xhL^4-D1d?8-6g<#-4P#KF978GiM7HuUM!I^${R%57(Wc2 zI|G#YCkNSz&nKJqIst#+?+}e{+t>mTsepi4)x8WMH8lB*vNGvqw!RUYmbfZ_ZVszimvP?zrRZpi`M=#$CqxtlFMSsV1}BL%BDu#QWPyek%@T6Wwz4}! z*%~}w0(w_?D(u-aBmXI(HK2s9$Q$`X%bbt#@4q@TWvt?o0iZKF7RIP!lLU^?Vs+Gf z$WZ~d={GPjG1?CC?JO_pJp>^4b|=vaS}<|E94+a8|0n}LUb3ws5L0|(nV8Am2&Vqz zP!6BHvt@(N0vulQSHV|TBuUa72DWeVK`L_=INRPGssS8{W1JZJ`~8;ER-PiQdjG_g zi|b~zZ6iS*R_baUD)~nb)>;!xVJOhS?(@QZnD56H+NZI5(c|Ul2FLEl;QtV5iIiI zGvss&_F=*U0dzr6{1IVL}6rb(*ekq8M4b5SnuTvpv1(L><_y9sD|vU>2o?Y?kTqng4gG zaaZ4(GVSMPtIwi+mgZTp)czTrPyZI@f4eLK`cp5;;U`QFD*cTwwG_Y=SIbJV{u~mV z>T<%z|AtetBCL``($^S@W>Xh|u2GYh@a1a_+zUIKW z(n%Ws*vJJgU4R)ql7!sOzQR*{3jH=1YwNsM|8O_ZOJZlr@f#}*ONOM0!XN&{KfWv) zxBBp(dmwNKpVT#)xpC9e59YmD&y(P``UqRMtM8R7G+G;10st^Rc7mNjv%XlV-%6#C{m&i$`QCDQILqrKfIBsP7hxq7EE zM<;!ZKB4w>rD)lNMJ5ca3=IEmWrdNlH7G@U!a(;~0rZ!^J&V!XC>IAxXHfV@{O&!K z&Bhz-INYPrxt0F1engo{gd|CB6K@~5*-91MS>`}#JIn_9a;)PkB(N9EGg%#9r?-IV zow(QqXB@{t#fItaCs5hPpMxLIg>7#2kiQXDeCaiv(hW z5k{L-=&wzWd9ZrL0^VgEk6yoKCQOzUVVcTTG&Ny1)(yDU|)ho<>!XD}M;U5y4@8L_qSqP*8~at9NlAP(O_*W>2`AH=JK&||DEs@!X=JrHg^ zd(}b19jXx(flPAIrLXt|dXT;As0>oqJkz@%dv@jy`xxZ+k7uBhmE!}(D9tm|tC2P} zXO^)Cng@`tMs*8YR$;%utT@OUV_TD;=XLIDQGt!V<_B(nqD5JAbC7t|AR?K6unc2^&4rUw&om zZZgjZ4vYKUj%mpQ|1>d%Deot%FSaiv8KjLZBpFj5MO+v z6OSd;DPzkYHtFt6v(V|Otq3<5Z%OpSCT zg~h$VPiO>1@El!TH$=|o#ND5^I7c$(4W(pVbp8^r*Anm8j*gr{Vl(A<#?WWyXDY#; zeB_QS9L5Q46@UUAl^Wilxj^B%4!po~9f%Ot#}lkIc-3OBKwaBhxionQ{Ctvo`$u*IVSE90vTiGs7|oIVB=yE(P0GWNtNQ)!vu4m3ME zBfexPj(5cE7ATZ!mENk^2?Z8rU|+%{uUHz(s_j_8d1_Nxc_G7Ybq-Pd&Uqn80knk` zl8(F20mjZQqZgF^sURJ?*qwcWhf8n+3xd5LbZgU(%1&JJVnVQ9&X5)%B!`T_JW>|n zdUJLtTb7>Wpm1|kIdRWua@OP!3P_7|F&0x3dY-^zQ zwy-z6nP$qXaMAV&g1`SDO|Cs6be6pRO>2$W=pj9UJ}n}PUS?njK4w56i&?t&^ylc$ zJgoLo-KZKmhDkI(N_xlEchV1(Ph6+5FY(Vb^Ux5p+3JE#i!y?K*a-FLq%XH)xAB0eADq?DwUBQc` z4B9DTROmX-8JE8m%892*Gy>5h%c8g+-owj&Fu(<-g}M+#t?mI3!s%ThWSf+F6WsYo ziKK;rm^707aszUA9?bb8{A+#|P^3HxtJH8IURfmXO^0A40h0a3fn~dy>fY7vXKnn? zj3)^pZax@=Spx+5ZY*EqIG@O}`7_KUT%lKR_-5n^&PXlZUeXBJW)TVoNA1t&f=`)TL82`72-iwUWh}nNMx|TroW`0O4{NgPhZEMI;vgv zb{xMmFE7CR=^vX8(-0hsOPM^a*f4PDI>S7sCUy#c#FbISV^mb~kf)U3AIqSAG;&;6 z5KN#$;Mxc=JgoI)dIo@0EKE^UUP0)n$n?9T@?9+0%&_-$s00kO_-1uNwCob+h( zZqy}+ZsG#S7>v~Bo}(?YS7sdU>i{*h3;SV-udIG(fh@=aUGO8*yo4kVi#JnVJ3XVJ zFm>2nM@T&bk=T*#2$0#?$LoCic>@iF98{Y9AhnFMgD24n^G{zs- zC`B~Wvk1eb)KdUm^@p02E}sfqj(pID;u|_f#4N&{i6Kj(mDWRD?4lu&6|v9QM1jwcEb-jd zR`Li!n{@<(M|OPskn}p0JSBl7Fyt8u#AB|k!UW7NS^}^puUt!vLRJsbETPc`3s+o9 zY$2XpMIKFOLw9zL!fynH83kC6^%3Qu(sdI!lJzT?;5jyY5_Xw1Vaa}U?*Ig(r6bxZ zTH-yM7KDh_3a>6EV<6VOwN~i2-7sn+4S@Ugm@fC z8gmb1h!8AY$*>cyIKDTTnYtpa5#a@n+@i$OkP}Jna@g?H1m@VRL97iWO!O!wb@Xih zFmmB|15)cSvwQJ91=@CbSwB8b&Iu$t`vsuY60*n%yrCa2ODXj>lL}Jn?^Z!Zoby{o z{kqV5NE$X2Dp97}7aJ`oh+ATv9=}uw95!*?FSPsDW@n%+Q*VZRA!B~(?rD768IL&% zp@W^RUl^E%CdrI0N(Dhrx=HhkW1BSdlY_$d#pyKX zc2oLETItqxuU$;QepJ|Yp~>&Ydf>XzRD~*E`-@*QiA^C6G&^M0?}J?*c!0~56^N0Z zRN_hbyR~h(JlGlRK%cWQc2s>0VTLFYMbw?PX_j2vagffO$MZA!3LiL*Ioo=C6nOC3 z$sn10QJv1i;&13AYoF=cb_Vr-)X{)ehU$kd`m5o zKx%=r)Yu7vSgJXbJ2h)BHEE~6gMU^z%`KG1l4?7?#3DSMAh301aFTr0F2po1JAWd?^r&o~A z>LlM*AArq4Fu?cj;kvUeGQD7>Tc`RUP6kw`8_V~(46zHk`X5Ncg`%>C9oRiWK-Rv! z#`z7Gb{-^c6q_lo!b`mjk+GcL_9w6RB{1LTx0{f(WuSzZ6IL84Xi#W|L>(G2K~y5# zPAw9GTtZ3^@KX6(ThtS%!r*l=tKSLJ>>bIUXrdT#4jaU0^AXSa%Qfp1yEZe;;L_9M z>p;8K#rUg5rVP@ZL=nKocpXSD>kV$`=DfVIw~A4w=(Eh{!Wt;V4lJD`4bdTqkDq{& zJzJ#`!27bxxFh)nR6p-jiDfQRI&$zjecI%93Q+Ahc|RjZ-M?#;I#y-LBt24PIQHJj zI+?2+3GcfW6)xdK{@No1m_R6=Un57r+K*u4*E@GaWO;4P^Y#@81=|$LfN8be*k)X_ z8X;}ugvDHXg(CU~Yl9qfuP^>fleRzgeXfj{Wt!&MQYjkend*X@Ln*Y(8CGdP;5LFw}i8y@JsL--60e@6!&Pu@~0N0H) zSc=sw9oWO54mSbVGnC9)arrKD_~XhAan=yQG1~;-Hn_zdBm@c+1j3UVj^so*qMUpZ zmSf2GrV6pUoQ4-RW2RaP7tmS?a-fqvU$@|1P?c&g9ZlHB_Ya|iM2;a&4C>Uw$Ay^# ztUCK(rExv*c1mIV#7Ws_t_xchL!Ua*iuryAoHGJRGuT5}Ac&1xySG zlE+AlPcD3Y?=)tXv|`jaSH;e+$ffNWB=864DHhUhjh$5a=ZnvShjA{OER&m%jIXzSdp zJvYGNXoIOQF`y#**Xd8xDUPvxkfj>fN6NEhjq?YexSCGRM79G1o{=JiiulX3@R1lR3U*&-%6 zTa$jC3|g8*#Q%ctsY7f9?oi2)YSCMVOQGdZSsY?Kz+=XXhnx zn<4x}_NI=N30)4Ws@efe<-T~;-vQS>807F=xS%8JDZ)*H$r&Ys3VTJ8BP`+Wib{Yyt|E!7T#TmF042x`!~66I6`VP#Mp8GX){koH?HfuPyys0!V#b9%i$JvFJ>WT0Gsbyrn zX8>^)N6#~&!E}LD>KY7pfH-%QQdf~b8n=CKK&GK-cF|@)c~TOJzm;cjkV zS~XdF;1M)$&7{4T_)R~DF#!tN(Eer0(iVoBJ1ae(&PewhtW+S$ENfbBH`Wc;?0>bM zrC9^ok+wn5w6fZ{y3u@S!No@mDs7nkbxZgTv{ApG04!0oVjoRtFSbUyW!M-V5UMH31e80Wz4s!uJ z_vPx0;nMOye^OaR;2MqIjV1h=9qgkXKtc;Zv-k)Nn{-ajd~wuavgqGYdg#)hmLTXa|K1 za-!#QRz;Lst{1qnX$`wUIbXl1YHDS`K~T)ot%_7c7?X7PA*@nsTwrklwp)8{S^YBf zq|M@fFCukvc}77{!~Uwn4T^a2>V$?v@=#!d46He=UvI0&ad%>(ciZ>|ofzCI>lo2b zdzpj?0SvkN#l4o-MYV*>D4Jm7G%i%A&5#qao2uZFusNDE<1aseS9~#9K6_3(&(}6l zE?DYh;QdpRZ3}jgN%Ne3o^tyq_I2OFQHs-rnMi?sDoT#579`z8c%;HOU3R7l#gkMO z`6-O%NCUxvudZuBTr=0Y0g-L0tU|X4CrfYX93pGX4Ercv*N1BOVSL`gaguY1dD6?b zS)+~Pp<;8>4P%JG=p{aD3LlMB_DqFwsqD-ZiZ`jsW+@MWcSxVEivMK}?`$O);tc83 zJf(S6k0{}|&kp-6dkMPT1Umtot}nkkXH|E+rLKKipvBBQR=oW-;v2hE1?Z0pZ*%Ou zy%b9yv$6uZ+%0N-%(auNR>=vpVNxe0N#%{q2UjwmU1hZ{VQx=Um~FMN6zo3+IDN9| z@TVc`3~&`TZbDqk_hV}ZAN9_D1Z#H*rPAX)nb^Hf5RmBLN}t}j7-Kg;Q1E8S7(fX$ z$;~6W(RdT|%9&P>X;GaFF*LIoncWKb6-#wJDBM}fFFoCf*k^eewg8W1=+-K?FWvLG z8D~ctir|}$v_1hfAg;H-B5>F zn$HX|V`g32mxEty@;Qk|lgg70w7pgJn(!AU6z$konA{C1te465yS_%SH|Ly!eA33- zy2ZDJHv8B@2NVbBQr~%$dq+JmbmkGi(|9+5tD9DEY~|ZIaOL4xPYrl=$yWqMai%cszK zG}@g#h_1VXQLJV>yU{<-4z8?}iGB7=36X1@+@SQ{PE-##(JQ5Pt`K6mIO>hxRm(6n zh`TB{#pzYKw6@T?X;TdiX?6U(e~Jo`9@{wkF#^GwhpxoKNS=|nM)kgJ-so$Kbupg6 zJP!HiTtg2JaT_q8|By>=7+VS|iN}1zfyQ#C-fY0J+y6?ZohLXch0?+;xI{DEU!>2! zx#eqm{7q(BtF@cV&+u$f?s-$M>n3)k1R@IM5to%fH3&t>L)$p8ddP0aE?BiQ_?e=D^w-3C*06H*N*zEI}JKMFmBD*wc#=AzHA(Gl)I* zZQe+FW#3{U=$VX;G@GzYWOE%04%%`W4=6qXPOWnRP^lzj**T%x5t?vge3(7j(!6C# z4F*P#g1hQjp0M-$X`5z`%JKw6ar&TJ7SixCL$O6Q{ib+=JhG`Y9c{`%8>>89k@hfN z0wx7d7NQ}Hr*Mf^fShR!*>suBPY9klWCC_kGQJbip(53Q>iMbrLF~wBv<$We5Mn#P zr{%N2tQA)SUuilyD8p*Jwv}Z2T4P_H7x0*zd5SYB<6|rS^`$`@u~E50VGYfc^Md8F ziyJ@92cE#J-%a!H&iYF0+G0`~kPL|=az2IWX3&?JU8-4{l>p*sPy*;LZxd~sTNlJf zy{O7%7#?vk_hZp2jp?VtA3N!}w4h_r44mVSA3wuz9pFm!z}+M^)}r*3WBaQr%yMfXFVQ_T+$?yL%AWo;Yb*9C0?3ivkfE_-gJkQbfZzP<_dzGd`t2N3=BXcA5y&# zmdf;i4OhUq(X4j%pt#oZ)_V3`C(WA(2p3}$B=9dN`cECocva>BCI9qRa-OEBBmr|Z zGoq*&%W;nBx~pDy*N``f9J>?zwX)jq4QshtxoIR-kGkw+Z>0Apd|3Gjv}#i4LvDHf zJfVsrXZb|O6%T3hqRcPr3|8*WN9bHFxsqw(`@Y}i3aS8L%2&j^qgjc#n@BSTzc-oU zsib`lPwV>KV1VCKR0YNW%3B^|wAgq!@DpBoUJT=p-O$LVdX`}w)w6!%Y*i=YN$x0K z>HkO7SB6E|b!{V!v`7veqI7qsf)YxnS&Qc=98s3GwBlgC~v=Q>TDMES;7B`DgPPsO_GX18b zU~HPw2Xe~kL8AgsAVrtc&KB9IBMVPPQZNdoH((={?JZU4f>{j0VZKcXkEs87@H~o9 zLC7)bQH;J%c*WPDS9>Ir5$2oVPaVTxgK5FKHw&|}&W2FwAFkkia#slPp4x>9hAZ&O z?N7#?jx#7(F*}BP)DMibJOkN~`WvN}6Q~&4f zSEo(`XFSOVe-)!Tv+BzCULbfsPUoB%y9u1kn7jv!5Wg&O3lBNXL*!qeIPwx$wz^EQ zG{x3d0woJxYbW~*{?@c$SZBDheP>x}hF_Wi96JyIizIuK92c_Uy|;75nd49w*E-B5 zH-%_R0`jw8d8ItU$|HLRmQoDKTXQBM)O&lL_T+zF-3+qz9V9(DB_ZIHpL>}-dXO9= z0@rWDyjRNW5{nzA!HF#QUM&^#=X3+Eu#9k_z141fW}jchP|iQ)>7_Yz%`590TO2+K zhi-q_Qx<*##&}hpyr5q|q?Hn@Px-&wqam9qO`+Z;^E_r@dyp*225D<90%;41D3}qh zmEp48Xh-xn{TGTL?{3F|^B`e`m9Qb`3#%?;FZFuAVDK6^mqv5$hPMFJ5O%NlMX1Wb zIW#nN2SqS`lN~x?IP$ln@L1~Mx4^-0PtR!zsse`ndRm4$)mFdYb25GCzyp>u05mc` zHIba1j3541eL>KeV4Jz-lV|5*ZHL-8t}zsB>trdf! z;nkj>iBtsP5Y!B0v*r>gf>~;8q&xRqSGmrd)Cq~EBgE`PzNA< zYxMt10lLR$Z95t3ss0%d*niOrR9SkI$EvG=r2UQ=8p?oOoad^F;qD(_DiTL^78^{) z^~;JD+0;phX4#!PZ+sj0b-0Jw;&8?H#v#K^Rx2LfdEO{H1hFw!t+FBt4`OS6WhAIo zMk>;5HZOA5>^(ECn=9IJe3Vuc`DWp_EQvwv1a*evhg#e(5aPdTY?LB^LrUVf{8$_Q z)f8S{QW`H=*8PyWom!e>f1>R#W;(h%oD!Ow!c|3`I7q8wtf(tCT21-VnRO}Tpi6tv z zAAWY0Y&+VV>XB(L-h^#&FzEAsdzyt#iG6IF1;I$^09B%(QFFmpn{9O4P#2zwnRD|8 zjeA>D$Gqkp*Y#Y`+TBO5cynjZm48dvNj5&U&H`gOCe0zBI?i0nx;{hNp3@-99`PY* zMDIjvba!AAJ$@8eVyO|@wUu}Ruj{~Hx*h@%W6PbnUqeW3l3esh`FWwI9?OAfU3-Ef zYLWR%YIQ#5XGIzYN%%e$w%{>T-P&g-9Z-_pP7C)u|4{&1%IRjQ^OG6*VyJ;JqYRP| z)L{sa6OqPuzP9Qpqa0S%vXr8O7Y<5k>PQO~y8i7c2e#?P$MTJ;hN+b}?5O1^~{uAuj=CXla z>eHKFanRM87<gGl;#p7BjWtyU-S)sEch%iGA4jPhB3^OT9tKdodXCJKasLbGCC zb}$P{_IBCyl5n$l5kvB=Y~H=!@mlq@(RV~Zmb3LA6QigLe<`4)+2;_pcG zdJPRk&c8;zWYsMP`Z7eXaDCY+Rg?>4-EB2AyJ;gx;$$l|2(>RESL_1^tV>WE_2ZVp z9Hv={PR8zp41mYQGL`TOz+rgrR)|zxgFGbOB@a71+r{DDpj6lb3)1 zu<19471p8${SwJDXuDAgyt}_pAx%BO%2OGNow_N{cJ$r8rf)#4FyGc z(nxbTNc@!ezkw;}T-L2C;XB#eTSbF^(Vn~%5PTr$&HzNsF_|uePG(yK+Q#~QD&Jr6qF|`b9&bP_A<&sq}>7xV-XvQ>SP9e-UstMK*xj z2Zgu^ghO>>*c(Tgo4%QyT7-$Yje@fO<-s&wZvjE1CsG=;iEHD@C)wM=mP2uKgzB6J z5Cs#KVM7rR8aXZf@yR~_oWl_Ge=@gMDZp(M^I8CjTLhPMqq5U%3~&Cd{e0bcM8P6~ z8S1w@v|u&BXYs;j+uY$4sj`|dIo%GM(|GFzMZj5>*kSu8 zg)<;9b!*9)71NGp;W*1DSXKp4H$A z60hYZEIOM$=E)*aYePYgRz-eKfofaHgXfuUcSdWbNckmoZY+`m zRmZwgK@;oYu54g{g*M<_-N=Nxogy%Aq9f!Jv-ts;@!9+R@2oZm?b%6tEC@hZP_boe z_AQ?f^9j1gZ=dCq-sMs0Q{p~;jKz=QfYF!4A;=qwBCeE-1gWzF6kQ#b(PSitSL{-> z_vtosHT9oYXjekLs5p2R9)SU>SqC9d7+=8lUwD^nB^(PTs6Z_XeEMXo3eKTdx3 z+W}$BcG>i_AP}47vlbb;v~nFP38xdrd%7<@Uhvm`W+%(v3FVxtXIcE~j6Rga z^b1~Ka;Zme*RTqRvMQ%=cdrDwmY#r2@o8_kGT$=Zhq;3$;UK_*Z;mI~$ffpmE|!f9 zQKAza!T^#R*;<2C#O6329%TQsb`_Lvw?{k31h@S0yTBvPo*wqOpm7F&Pc0puWw+<9 z-u9A3zE>+z!o(o3cXewa%dB%d?Y!TB{z#3l)2|?;eP4mag-yq4ptfCi?kQ5!d+`n3Ch_SPbq}DxkfWv&iiG3ifuwvm#j#3mky3; zj;e#TN2ceeSQlygy|W?(wq7dgT56`-eWfy4e?b}0R<-+#eH(G4e+lGBH6C^&BK{+e zZT-?U{tPat#)m3^wzEN=deddCOf)+CVU>Kpd(C9gGgooCbp__8s6V)n}uZ1f1pb0lMCV)O@{AtpN1 z8Z)!qFwZ&ji2k>YnmxYxx@O)3P|^>ImeTv?LZOOc29_+Jegi*FIS8~|y*%9V7`G?5D8@u_eCYEMGe;ho}mx>X%Z#Zv)c|eOS4M z#}>U)4f3B&K;GkaJ1<;k{W9gooOx?rnc*t((xW@sKFXkaecpSmt-o$DwB_cDgmtb2X<>SPQ*;~Y57F)^1|AkJ4NML#qsHT-gk#F=lv)u0FmYgF+vMr^A00t{Qh_DnZ*FV-wU4?X%QKs`3O2>iYAxN z))Ac4?AzM{4_j2>QtaDOBmK(KHs?`1K^LPzCr`H3lYRylR^vl=2W*R?x!ma9>8yzq zG14dPyZHP0IjlF27`TWqhZS!#tGzo2(~sE3LxDfWNdKlM(2IEIFP%%1UW9%M(;usn%$UV-z++PK~i zx>h1=#RuALispLNu7L3SKT!DCE^FR7MulT(MI06Pc#k6lqy@`?D-fP(jbna(m*HPz zLx{wo_N>Q~OpJ?(NIs_gS=EVnPLTKfuC!2!t?vchQhX8$JMXTe*wSoNDgyUEFn`TO zxWeV^O1rYv%?2fHDKIPC(f6BD&i5Q&Zk0^{E8?z$teZPG~`o1>whxF$=;-7#=t33V;IeF;HdO8eP65|5(=%6|uwEp>y z17TF$+Q4Tq!W6@$SoUPW3^ZNpZ%$V%zP`v?!Qt|?0B{=4a;@~B&EZ+x8EF}VVlUNm zfvI;VBMDnn=f2y!10)>2?>2}`ncUtLRH5kE&bG}Bbom@+fy7W&LHGI1wDAx2*@ey5 zR_Z0m7Bij;Pc;|O)?^8i_3zo<)M=qA>uXeD$}^ink*`ioc6CNQ9m7xibEW9|^h}NG z3dP2Xf%S~bkQjQG1H^+X@F&1F{)2bA% zu})ev1*sWQJqNtPq&KCDaZ_P$_PKB?nhCL_)Ku9B*@=oz682u~lUKlu9tE*?eb|2e z{# zlh}>dJ1wk%Sm%!--5K8mDp!4T)d!B@=eDocb+^d1;A+UYKF^=e*Y13zsqfewUitJC zLDfMEJUbzAmlUazQpk4q*aEsFU*5EUwHbux`XC-`SQ=MsS)(=t8H(w%Lq2ju$ zjOX1g($M={P{)@l98*uONHQAJNk|UpP*PU6gaHvhWZ(?_A=a1^i+G%wm@OAd8iHA5 z#KLGqwt4Y7orrbbfriK*A3Z9gftO8f?mOlaK#<6C#=(}pitzXnl}mgSE7ti5zJu;w z2ojk#qj_QEE1T&25%H!LL}XYM@blzwT~RL;f(lv2rc&MS4=fwFTrivET86=#x{_$9jLsXcUvd(x?pe-a`tL)(z(zVtlD7 zC_9<&k+GO2;#n-)8DL(c%dsI3Esg)=jB-|CAaA>eJ~T+S33os!r@P0nR;b;U=r`$3 zTn;O;`Zq_vbT3nQu6F>5rSk}fH-g~xw{5K?-sp>I?@nQ@=X9qPViw)Pcd-7C;qIXY zAXpf_!r>_T-z|OGvsi;s#hdraX2tYdS$1bz6IwmJjTUssFb&brY{P?bWMy`}wkO6Z zeXcuu!R&?4qJI76{Brko3;ab`Y0su}m`5XiI7cRZzGP2zjA&OZTNm*5G4~9Sf9|<$ z`;{AiF3mfQmkL>o0OE@^{K8H;_Yoh)pQ7U4W$4jc_S#WgO?`A@>A^g9zLJ$Tg^ZRlJxCT%g0qIy`L0xkAHIA+w)7)HB3QiHCkoftK7uN zk;@-CE~lLZxQ&AsndVtLK`v`Uu6ZRGYfY-1=!Zn#Qp4F7fE!S(otFT{q5_D7K~rV! z#;0btE|f_?E7(Y*0s7tS&gLby5pJR9p??13uDO~3-P_lWR#+YHH4a?ciW-NZI+qp1 zul0EF1YkN^veyvnYna#Dn>{ur5YZnLmwL=57Jc||8t>ILrDS>V%PsX8lrssOT&hzw4x2AO`B-@y#88tcc%{p_u9uLcST9wV4p& z(lNv&pS75H^>y1&G=J;9*q~k_6X>@Em=Z7j#&6WS3ZT^-E>E}T5l<&Rqgn&EygOx% z%nM|lko>dvHjLp7UCTkRGEKzMw>+O~wng`E^o_K&Czq&KniOpr-UMjAUBb~9ER++&9A2+puu)RMAr0Tzb>>0EDeh;31L5o1hC zEt&aIbA(?nY=)^0%vQY^N_tDU&ho71_0qDefJ)6{u^hg)zWBpee3O5Ol zf0!`bji$oVm26yytf#zHUmnSw@!(c8CSZpya(8AvxwEa~1a}Ct2(zz!q3=}Iou!cd>Mv*3);gPe zFMD@^(Ec8TP4wbMC-Pa-*f_Z)Xf;ugQ}rblI0`&?sXV}DD7g%J5(O-&!1p@%fqMVj z>hi)Y8kZqx$pX%|l!^O8o7?&g(5~Nqs+No+3~xf@@VxU9_B;92%4Zp#GRi+NYhhOZ z%8guayQ-{W*J$#t^o&Vc9!(hL0JN{FFkSoDSd0;Raq_vF6YSg8d|+})8y0QMbo6Ad z00T0gg?lkoZCvORsg)^HLeteJUV>D)^L!y<6z>CaFrFQ z!W7JftB;K4toJ1ndWA3t0b+YqO-tba5;&|#0dc=x1sYDOsT5%*d$%XWy5x{Gu=|bQ zYz-{8BK5GED-P;+)+>z9(0dz;<%o=$he=~&BJJC;A_dV5ixZ>MfJ0t%6$bp`96whe zwntLKtLy%Zyu??M6ME@ltXMjxxD|}q1$+p#l54l-`;uuRaKqlC^D*m^#C!IF-I&q= z6h})ISkM!)KS?~oLr!mad8DT4-f%Z>u#6Y*Q#=wou57)0Uwk*ti5^bf>{R_z0Bs`K zQRmaS8RvDv=Em-T2;ZI#>ZQ*cuRpVU6eCYDQ@El&FgkA)6zm-Xh$xPtK&rV|xTXB} z6xRdK+J4%dU35TGLiC%r0rso1LTyXqELLpd6a0B7<<4}qGI0Wi1MT-#xrZ=s#|f;( z5-|Wp7+{OQ9G)oyN8{edmOzBhtO1%VzeW|oIXIPF#XJ3Fy$>yLrXy07o=UeTi#vK{ zNt@sJqUz+bBRC}liE(LwAX-I#;Ba4X+kEjU@SnQNXz_-erbauNtN*^Y5*4Bx+qrub zovs9@-mfLai>K>+^QC9jPVeuf9c&(sY+46BIGRN;@6rq!6*p~x-5&e1asNxia4ZI~~GKW+&pu8*f3(%ExL zoKoC7n>skkIistj{=U#reu`+ZaeWfIQX+sOcUV?Ne+{Fusjx=|Hv*>;CZ^y{_NO9@ zejZI^iZC6>k}7|AKL0`$`I+GY!n;d!S)z)P?}oY@g$pRbsz>369tiqxgndWP|SB zlZittgy&E>cDXjiU(gji=aL6%*_jUAG#?XJSNd)z;uBb3zOWqI9* z#u-PUKL*f??kbkdlxq2FlBIXX-&bmhjf1EUjFqs4OGX6L*&QG@=cr1P*o^A!&e$nU zIxzn#m>7*%HEo-Ai*}a$C?@brHC*jUgR7XSxA!8xA=06vC$<8DZ{!VHmE+#llrEon z{r+&=LfjHey*KEy`0GmHrf0Z6n#O5r@=$0zziNPjcD=YvIj5!wyg=r)BU0%D%pEQY-UK@!(rUmLmr;na3&^Q-v%^&W+hIu*uviT&U$`S9sH zl7eL%S_>h|@Cx1D0eK&rSL`x55F7+T=LkRflzbJY8npC1w^`t)3>8+~@9iot*J|en z@^&PUx7l0WBbF`^L-A_m+u>v=0_x=s5Ced#heY;7HLjSB;2H!~X5_EuRMsUjl$>zf z0%s$iRWOZtdxHg>-waa0oXh4A?I8*2N3bRuc!1BLhb;;!yL>ULr-zlASNa5S&!74M z&Fs320n0pb?M02N)zhuS^nX6#EtiI$sLz~u?P|gth~u#_4+5ADI!fMfBa^qKG%U02 zcmK8&`_rsA&E-f8#8kGYm>NFkvOb>+5^Y;3^KYuQNoi{C>GfR$mqj-HJaltNkc4!Q9fAz2!VJ`dcVyKIMKn=%EClzWI$z-9m|wmPfLPEB*Jx2RC^)HC zXBR`lo;j=o!fXDFjZ*ozGIb+^I{s{*tuU8l#{vkjhfOmK3vYb}e8n_fw0PxGy{kNo z(cD^W)0)f9k<=N3easl+_xYN>iq!0GX5%znm4pSifj5Gn+O3|oi|-cL_Acl?ML$tj ztrh3ijA;oIkb*CXBrg&Vvgs^eIaT#7tHC;woSTpwAR=o3Dk7TKK5G01XWuQc6cqKV zXh-pRD|qpcUM{SREy9hN+{c0_=Zdhhc0dXq;CC?aFx5fnsHnyZF%!bl&BE-=>$_8Y zH?3eYsB1ih&ud8kUQ1oG1Sq6`Uu4nEYzt%kkpnFK67e1XsRJk;Tqn8kXQRK8fw=b7 zk;$TC3ZL|dcTNN{`ca8Qb75+KZ}?Z_&77YF`co8 zEfo)RC%j8Xk&VaL8T5zSX#Xx$5pbrII^^XBqVlhqA_v*& zWFStz8v0v0WquYZ2CK-sQ|h0>p|9BBDx_bd;RT*nP96ByEWMMO2ov%D5- z_|{Zdwtjh0+Q`1Y7E=Z_YS&i@Z94KNP*%bjNFySVZE??avdL-gMG2om9sV?YvL<2` zqWkT%&_-ib#A-coe-h4{;&XfY>BXicJmmCf=%yI!p@e2tB(GE;x&YKiGMi zYEz3F3(=%kzZ43{1wi)?nT5Ap4&bC$<(oDyWjYN_5@#UmiH_JuXZ?00|7lVj7p)Nb zd2=K)qq(!r`}GpdA6r^JYMUXJR};FYr>{_4WS0RDb!r}jD>n8I7$WT=-T&C4P5`9u z_3|N+UB3P+pkjIMYdE<=j+Kogg8$2-=4cyeqQ*2`4gztt_ZBc}L5QMxioST2{vP_h z0Kv=$>qyVq0}y$nqp9$ZV{XHnC;i})*7xhNTA1|SKyA8G9e72<73^R|ufFpBCIV;~ z@;QX4)8Pp(jqI`ZXOnm(2UMXMPrP}hpX~U*WDezfR;akJyybf1H9-3dB$@r_)E;`} zaL{GyA|#MPN)~4s%R7rs1zEhyY{yu#_}k=Hv7<2CzZ4D{*FI*9mU-jLZns4duye}6 zdEXGz5!eA)ksR1)YzAMHA{}Qwe0@FrtbfER@F2^3Tk7&h36J<=3?MG@{m5Kil59NS zo;~p%(bo#zD87(%I%Yq`D*m80r*NqPRLJX^3({T0d8Za<0i1BtWk}iZ#0tcqO&t!w zt}g`XU5u_cfm~If-fgze0HvYZEn-2hShzkA<3{NG>F3Yeid@q?5~-rIwz*@|ZI}O| zvN=xn+;Gnz{hI?UCsjj3PsnC@-Y6To?eBDwx8t@Se}Wp`gHfs^;!M z@?Bj>ue+ofT`*df9i_~m_gWZO!^@n%C$OhDyJT5nU6F|VZMm+9p&DP6L#8Rwyd>8| zCywcK;})Jy%Ymf#qG#3;iMW^6nq*$3yGqhSd7_~UI)1>3RQxcrxNV4l4sR44;g1PQggklBlMb0%FIO?## z8*8iw1H)3Twttnj`6T#n`;^u|2?t6~VWGi$_*eM=u4Hm!!zL&+Hw>`852CyOlovhA zKs;RvL($lK*2mU={YsqHFbH)sG435;evb2i`2id|{GXPk$<6Q#=2HFpVf?($j}B2` zWM=Epq_VvT*XRD8i=3Ri6rV?OC!lu|-tc1CFRe~_BYa=-rPy0`tn29=e(UjqVG<)0AB6L=T z$&acgl&G`~djAG%n8aG4KQid=`zJrT+$e91?z4Dc(luKF6?XCq!^(REf$1C^ zWqx;j$JnawWk{*g=g-r34CDcF%8v4hN5siB`RQwhn{I=AG~Kpw#MN4QsPh0? z-i}}3ST3>&d8UkQ^2a^8Xx3xcerS4SVaX!t}SIvqDSq|%j;(TE}CqorU}u<&hm3ihN>7u%D$ zUK61#y>*38paZmm9odpf)35J;_w})apxQip3AR@~{`X)O0vuJcFlm6}^~{>@Ox^Tq z8csN4XCU21XHigAa2%-q#^F@DYDs80g*nco8!1KRLPwE?0CI)hgp0ntv>37mjhp2n zS;8E~Faai6QfJ@z5~9oU=JFfc+d7*-o(aSg_e**FQAJ(vp5%5}=7$`Kj4%vBPqp^;#)sC_G@Z8P%((PY{*jLZI@>VooL2FsDSDPMX zhwESU5d72QoLZ%5Ly>${+P*Add>jBq_z&2NSJbP8fs{bzI~gB`(>Q z$PmCj|2j)Die(JRvctl#!@(VXeKOTRWh8?+cBgwW!lsS;C_2B-drLQ9J~JXq;d;zT z?MRm+@6~n(Xs|n(C+<&(;34k;2Ifx){N~@@`$_~tj@3!Q%fu_R!DTgl$BkFMbC{%3 zcy5)txlfVcygkFY_k{Ja!N|jK(m$9+l6aarvOt+jBCA2E6n*R4vt4pZeddkW7IC9ZwhTdAd;{Q%Mn7r;wAOvT`=a&M9 zLy_^_(3Ep=(j~6dZuhAN-|`xsSJ7o38&c2+1y}~!kT^MHo4J@um6jcyulBPL;*n?Z zkg6EDDdVfu)=#8X!59H2ur;_&1a(5o1+ zM?Ot+B;n)P9b@atD6C~O4N+}l_Dh7dNDRGpFSgO>&@H78@T!pHu;zNT$U46~^eF?D z-D`hEzb(hr5gKdyT9 znj%_oI(SY{IML)NlGi%^<_6K10vfd&w;=beZ26mZy&TtW{5?s7W5Ui~uL5?9>k#`R znwUf3jzHLdG0!c4d1~|bk2%}a%KeHA5I%XJhBQ7CHk=iv>*3uXM6i}rtekR7ERoLT z+zkeQ21o-0MRUgsmDF6m4^l`a>+^oy=A62lwxTch`e!j+&2BOb9;Q9G%j^^X#6AE+ zK=;kW4Z+l-rJjpgf((sPbtjin->O2MBSYa6Z-CO+mja*O#}z^kf?ehv4Hao!d=a3W zJsh(DYJ3uNeS@frpZ9y?f7P}-G_1u>^pmv3he|Yw97Zu-DiO}K-3=AZYRf30SA?x! zMwJ3c^s$jb*3>;pC3lDJ$)1yuft;f>b>DsiSo0J}pc67yRHY!sr~Y>!aqrWzP0R+K0DmQ|0+?A$<>)^NwI_gfx z3V+M&9ID9grJ}Q^1n&L=*i6b?SI1iNUCbPlewWUPTtL6PHE29GMm0FlVx@bWc|v)V zsqyuM`W;sXe#KA9pQ|*!z1T)m%pLDCFp6z>F^tq`1{r_ssmWNCDcpn+ai$p#a~t92 z3mL&2B((tMv7@g{6E%NtS$Z#HAJK83j>ep91zzlbF1GPZ+Jk`FZvBzU2wY3gUxqX$ z4%vc~ojZ!yUK$G!XfdWrlh8tAKS+vD%Q}3LVoAhu>;f?u2aV~a8NddE#p}KV+G+99e%F23lg7>jN~MY^8%wK0EQ!6x5Xpd}CUp3^z~59N)m2^30o0E1Md!`u$Bt@BH~uJvd3-y84V+tayPGW21LC+_lKnrb#nVY zMcFjdo-7-2XV#(z>#n9y9#*Uf-H!ihgWxQQdB$oFTR%&;v!fms~&U^Pzpe&M4*ufEPhe3OigY9l5fGn%mM?6S$$ z6o?aBQ-goUPAvo}na5~_^7;;vri#in>qKT-vac~a>_q;zYpfMNO#VW#37pCbZ}TwG zZDOIUh7&p@)PB4j*yY)0eMH%mqU`w6$7B;k#9)L>W22#Jq^{YK@Mz%oWeg!`48GU( zx4dy<34}c5Q=e28&@cBKSBiqN3t7gSFd(OE2wkLb=uQOZ^mQlG5O%=g?0$AQFf$#D zR-}JsutoMqKuRcaXe7PF3vUUsT*r$(9n&7zkHI4HyBzE~16yWCKE^I?S<-sL_#{g( zkb9DEM6|F&Doo61sJxBz<=AqmV%R#6qI@|^4)v2~MQe6Xi}VfQY#Z+kljJx47THXX zq`#l7JZEOSsWDkTyl#9Jj-Ce@oOwBW#c7G~N#9@l!OO8*eGPTv+v(|#)}7=>oun$e z*N*7-PRy$0v?XN6#)JJ;L?>K6MfBq}?qiH1j$vO;Ht3ezzfy=)VTP(SY5DZLrIrtV zUmnP-?G^?)sb`o+KYf;redI0WU<*h;!F~<{;`j-QastNRVmSivYD0sc>f4j0@V8}Tjulm6 zi7)=~BqF-)i%n-PHNG`KuP_u?=BBi3r0T~X?yizivwEJ}+JLL0^th}HEJ_(z)YT_w z>^5p`!BY5}P~HWH!Yk-})w9C$q7skJtp3oe`Tas668x2)i*blK#Z%&25QDJ~H}5sg znEAJ|?VL#iJY)zseG+-bOF!Y!<1IM%li9hqZ;zO36b+R6d zwRMPgmgO_#32O$1y8Lww(Ch|U{k-NeNu+<|w;Q}$axeN;=HN8pFduF&_2X_|ROta? z{a`)uDBRZ7#~ip4Kxfj3xL1XM zF+G%t@KBJF#h!Sc5RWE1sK$$1_Y!e9bdnK9TjG02 zc(~7d-yq5r#2YKj^~Kwik!}g0-<}%%68-eCo65Q)>5`inm`B#eI5cIT;^^x7B{-z< zu(yrsP%Gf2jNWg?gRy=h^mBfQz^uXaT%aQ+ma9V}+Q$p^T+1cT=K2tYmmgbawl^_6 z}(j?$ql7UklzztV@^X zw`Kz`**`K1A_V$4VMPrqO~zs~8VBxO!#_66Y1|cgquv#*XWYGGBRxcyR{*9z@A`zN zk2GMbX=h1w{Ti`HatrQNS@9T>Eg<1jO3C~MgbuT}sQ<^}7Q#TCzV$j~m^=imaO#LE zv0JT+s8T$(j1~KlR!J=Ikiywqv$fKM*nmIqMc_CGvQMq<>&82^~t2FaueVavL&e*=WUOj;;MMeHQl-5=H z$Nj{K(~vij-G=?U)#$8`leE7Xq{;nNwx!;DJ&7}uDDeyy*p@@+V$xL&AaRi%#E3ql zy$T7!hK^v6>Y&5T!Nkz) zG3i6=jmhl!lJE?mp0Eh7ScD#B-Wu$Y{KsbrS8@S8cEJe(e;Wm%e5bu!1tE4=z9$rcF0I+K* zY}&9Wl3XkP19Yi{?`S&^W)~pL#Wq;yen}shQw9>zJodd;)d67OHH2JBgmRG>W2cer zC1yU_A)%on4QM#okOd^PF)uwq%cNn}6@?a*3LPj!OwPWAK9JfpUujnV9P(E!Ls3cH zi%9{;S8j&rH{W@st)b7N z4JKt@A=r8#9Raaya|YYMxmD2}9dSgC!>^#Rqw4MxqnOP$cBtz(DbWBn2v`g#-K$$; zffQ&E>TUpXY6qO@#jY=dYXi+|Kb%*{3wGN>FC3%3K#(^_g`123;f6WMa8^j=v!JM{ zz?jVjxczpBo1S2dMkD;9tX){a1eyW`En$Bf2g0*II7Hg>um+stc{5GBS-F-^pCcD+}lQ8 za^5*Ar@-_bnTgbP&L}3km%uZhu(2LzHov(<;f@EJ8>t^=EY6zs$sH5;j|7#VU=~cY zi{BzZxqe)Q?al`_7*T(MV<9Dx_589JOoCO+iGfg|33k*uaqcnOUk8Xi^D0MAcjeeo z*mc%;1Xftn_j7uR48}e!V16SjfQn-A;<$_kQUrFe*Vi@R5!$7_S~N>AS~tHp-E4oOda^EL_S!IVj$aJqVZg?LR<279ah9_7B~OL14)qeQ zCxlsaKs+FT%QupN?ezEncZ&D%+&0-c)X!}z1BBd6SF+=hg5*4m_3C8BDbUZYnq=+E zFJ(V124><^Y5P4_KzsaeGk2nHW^G^0mb6Dur*Vb6S(TD2#k!m@wbLXo56>~`nhpEA0ypRfB?# zy1U}%t;BYsJgLbzB)vuezW_N{M^%a6zee~j3r)fXd_H4lv z4?x#F=F?z}p^|c03iE|o&+e}Rew(q6@Hm%|&oPU^l3jpM?&15{fQ8EN=A}0c2NK@b`tK?Ee^aY}W4@}&?#Fsy&ipC23V1n_(8o%#+*{h3sv%P;+g-UTc zMS7zn;159+b%8nOQ3wAxd`LLXx=%EsE3D261vf*=uodx2e@M)9#PmwKMXSEmm_pcgR75K>uaEzs z1}_ORg~sx;2C9%}nQDB*&~1!5X%~3KC>Ncvj`<{mzHyOmAnB24^gT> z4uX*t;`gH^zcpqxSD8k+eqZCN68DD^;?76G$|o5D6<8-hP5S>8E(*~3WSVWfo=0g; zuxra{?R_`52KHfIuK8)sa&!l1Q%OonLdjN-9cyf;j-YlU7$Q@!AAtJwp7TfrO{53Q zB8xQYJo0&)E}giS)s8;tivT72T@8OgnoS=<@D{mh!A2ol!K?GIkf~;mE_%J zM_Z2iKVNEr?^NZDI9Mn!SCoQYG;kbk5xFknCLrPLKeKeRfR(2NOBdTDt&Um|O;CV6 zZ5{1C@L|fa>VscqbWL3L4i@qdEnV#P*fB!PttG#h7ArpK)0JG<>48fkf`LmS+M^B! zSNjsRLX$8tTx}80&zI*d{WhJ7Dbj%~i3sE|;-g8n5aH|0o?x+ee3-7R+Th-bzQ9qR z+x4@_@~1hr8OubS2iYd)KcFsj$vFlusC|9H4)<`bSLGA6G1$t!)AlZUyDMNo0GJ8u zjDAae{OI$5AemT#b{qq^|NlVoFph{g_MwstQFvP)qw_gZ^>oSo_p^159%oU_Zj-J* z>fB@j6;%-2rd;4Mic7A5|3}taM`hKm-NPav-O{0ybVzrH3ew$;bazTBsVE>wBi-HI z-QC^Y{jJ;QIp;mU@%`Z#i~(bN-+Qlpt$EEkuepGePy!5W7(0eQWm_b?vI&H#!`WHl z?y+)kd^B!f^bn~-eGe&?e?YOMnD`(QVp6{$oDfTl9K|pr5p{+=zKoI_&} z{$=W%O~`AaIPKw`Sc|CCpu$(>zx2%cBg?C-(w*>!A-#X{c=j+%sY$rmtW2h&P@GS` zv+xx?ZJLyoV>^3;T#gj&(lO@8)ur;!+M|iY#(&)9C1{6?19+y1xrz7O_1UdYtj%^L zNbQ11Lm#__PCHtX(cp;4Z5fb6+YD~$YtsHMuxrje9Xzhm#~S>Pe zu}U$x>_Zc5-~>M)OJ1hba*DVcRv}5M03PN01fxKRqX~2Ubn(&{$~#N3NLk#=zSJ(%`}`SQ)SP*FVO9{5C&YUvoBxJFnWA0B)a=@&ZMP7y<@TKAM{W#pX1j;vMXbFDKD8dtDk%~W>Et<-6)*I?|VEa+e= zqNHhSuZAUwOuC5H5Mb9`sAHv~tXCbxoZd|XGNN;*aOU0%CXq|bBvg29 zjpLeL6o_foD69~H)?TXb*kD)AZ&da z-6w+FuYcW^jI~XDnJWfi4j-oy{VBCAg?%=G&2oJI^wwt~qJaFuwDBAqbia#16+6z3 zoE%S|ou?zOx*&zSORKpfLhO|O*vnD`*Y_tg(eiuYMZrAx=rQIsUyL)o6(z^#!Nt9J`q~Jx{=hvs54L^C5x|2vEo^8H-73y{pj&oQ~l8@$Z@$)l!Vd@oo z-Z8TB04<+rL2c)h@XX62i4{BsbrBA_pMD!V&Y66JKo;AzqqQ!tqy#>od2oO z?!0Lmw))Q<(gjd@qDQX8iPyQlkjdPve-cI#@)oPTOuHfk-SMK%tDDY8(`0pyx<(wQ z=?*-qKK^MlPFdFWwjYIUb@Kc-b0=k!hSa=%aca_%uug?GjtA-K)3RaXnQ?0S$9g{Z4Z|W@oUFYgA8Q&J;n~Hu> zq;~8Re4aR(!zK{#OuecE;j;Xb9W69YXdWyKgJlEdHI?wPN*I24{E%h0>|fRh6)79} zGm~}RX@bL;ksB8%m7`u2i`V`uGU^r}+E@c9g0k$k^m>zJg^V0F0tloMlZz7lw%JSq zZJ(>Uu2e4EF$=>&3O6kn_>f5R_&Fea8_yb-{=@DCbJ;y z>Q4->QQUrqH@w6m$zxOHX(*Y4Empd%O4<`MfT@rEw@X1T-40@wl^34t(tN}s!Arek^YeiW#~=%59A9VfV}N<@wG zLaq!jAmeh5c7jRsHd6%qfy>d(3%)_NuavwGrX;ZSqiV}HPX4&^8er`ARXQ;j*?2$U9E=s3Dy@?115%O|9-ixRi zdlQCAMfs;@gy#{s|7`JV*Fz0MbImoT=Q?u@?y;(Ogs z^MEJltrCOIXaz-+ZV=)G_ z^h(A#6dzOc7IKw$m3kY_$FhE75gtTi7B^UBLRqW+%<1Zp{RBbSUrY?7yG^H`*E}vdFAMO8%{cv3}d?kwYWiv z?ES0h}nv-t21!XM+{%n+@in z#47e$ZqvB5rc~Qniz)Yz3P#0YR8)9Sj@24iq5dj77;nJcM{j*m zOXCB`gN)@T{$%AVMikW4QsW5G6x85lqslJ2B`kwJWsc{;pIUnMT9z=l#PF+*pHG0o z$OZTVT-5LTtK?t$1yt&qCmk2ar`tPY(yiDW3YQ^M4{o3|O3Jq|d9yJRNn)z|4W{3K4HpAVT zJF;d_bba%vad1r$Ya4Ob60Enl_ClQP7SgM%5XnMDI2fJVPsGCw#?S{xMp3r(4*K8o zT!D<%@DA>KQshM)J*Saov)K~K0hI>7tXf>mW2K~(RO@3#G#u>?8I!$V3T2un^JupV zZW>~J=sy`!m1*R1@vSMTl`Ex%b5vOHytD3zlI6RPu;eNop*J>KYtyL?IChYlpfhwi ze_{kjU&ysp^_}OiNa(ukrf$5F7m91@#2Z5pt`6Ca&dWYTsjYFMwf4|X6oN* zG>NJ>;czF48e89~Q72a=JB{q{#BeyMA~qb?jf*D!C$jb0!$2)k%a4`G*r(wY<^QvC z*B~pmeuQi6tFod1{UiA(gZ8hrmWr#?jnc<%7w0xW+y{9v>@tTH;Wm|$!7FVZfEA4T zlW|TeA#m1b66QYS+*sGLHpybmiD?s2cw0UAJpX!HQyYhB4E)fM!>D*%5}JUvzm02oY30}s*wqV9+Ud1K2Mpq1mbmrj z$KCN=C8R3dUcedjx}i)Ot>9 zEk-2=-zOG3c830F>WgT2$B=%~6ghLvZHU*G@KaIl!zwqYxRV>2n(lv_Zq@TbL&d#O z;HI)SA}8fSviL&{uCF$GbL}-VYqWyX#-|&}V!7L|L1g(DzwX5@sBR=9EBkft`VtbD zH~uQ)*b@!LT^Pz7om^zQw(YD^jyHa&bSC-6Ytdzw>9TOI-gI5nM=t!v1{6@x8K8Yv zJk9YOOg(*r*4UAbEOu(IO5iU#%_mOE8fj6a09m;;HNboMJTULYb48TSJ1`oQ186b+ z=z?!4!93d~Z+{B3eMh^Q78Nb}sFn~~VV=jv`M0O|w~d_M5zRX6%~9B#t;g1=MLqHc zKI9=D--?pvGlaMfXhdaixdzLqErMU!i%!Vg^=nRIO9F0^aycKzy+ya!A&D*HQt zQ7kk^kmYSvT`ER~jKJo4kjj@R|1F=Z$6;Lzs0jTeVjb2UZ6}{9?R`TQDDvo=|Lrgj z;p&ZW^VYs77Uo|MILnu5)4px=E4LRZ-5iU{i)o-Jn*d(I2|Gt9Tqg!^p1 z++T1&==GDivH=KJSd zVFX9ojgOt91H0c8Mk}ruCCPVfHnk;Kj#XIu-`Rrd>8BZPlwy zGBKlLeyCOMh41eDkaI!;CCtc9Ydl${(0#LXMOK2|&YJ1cxTE=qU?~c zE?chXVoX#o@}c`x)S8^b0L0{U^+=u87A@FWgI`kPq_xdmFvSIB(tPGnlv;(gEi)Ca6JSKiAAY@kwjtuHGa8r#R&VG6 z`{XLj9nI4psmBIwm4;)~=mFPyXs0vO%_Kv`DVP<>KbrMxuE*(3pMH5gjopsQpl#-% zoBc}03v$WUFNvTG(($^*+xpZK;Qfm?r#s z>+B=n4;?@uEa#l|V>ulg^#|J<+Qh2Tx)zp@l9O{Jp~^-Q^2#P~zlx6R}qWkxBV7Adk184zteq6^-cVaf#~y{&RH&cOAcF3|1_)EKrGye=jrQR-x^ zXF3Oc1%n?znZSiO0=+WlK$N-RyduJ|U`u+d)$bRUJRzwCm~h-v$U7MbCgI7$dQDQ}Yu;@N#t-y^`yCg5;7Asa+`D;|2qB zftSVGNk6Q2k{mw8V?TRT=oGyUU936A1z+o49A%AyL4Z{9+A7e$2_Q^eE>?y{On&dN zy6Q@7ADX|0k<-`)+r#gz?7E96e+gqI*Idhv7qa6mKQ`d=wKZwwoN04?=-vu=TPnjP zd@S8t9w+=~)Gx$sQhi0czeeeb@cE(IFMe;udY!otI5$tz{Y$bc z$&mi}HvQOhKMd%EKHK1e@o~yYo+=xtuY~IX#SPAxmO5Y%aCxmM+`&Ih{3m& zhFfypP-&kYVQV@?ep@_E1|9)PwF%vetZP~tD4`MXJ1#h^C6&)^?5@LQ(}6a8Wsu$; zw=vb(&XqmbXg+-?771?a(@v1Xv6U{r{@CdQ_6{!9(9JTD_rkJxa8m%#u%Nwq*pH(C zyW5aGLp#^@49%&Tw^MGzf_}S9WVKk^x-27C@P->U1ZX~DFZ$q#dmQ6;D++nUUTKw7&5PmaLoh;vP< zgzt==*DObv`%!ECoJru<|CqEsuLb%KxK3@-o&$8kC3}vcH|B#w3ZSvFazas7Za4ZM zN!?5P#0Kcf`;|_WX>gbLUMeSM*lSE=9bp)G>7@L0q6-Jo@N+Fu=wxz5%>LMZDN#TY zZW1U8O=irnpO^IK4`rFG;G0iw1ad_e$yB$k;xnjUAx}!P5YIfUt`M3Xhp`;qf?dOY zB$BCz)N=jdmx+1BE;jc>!I#%ryV9smN=f8(bZQ8KHiLdzbi}BY<$!)Pdy~ak*#uZN zz@v@RPwpp1SiWb3Flh79EBJAEClO~6Io~X}EhF{T733-|8W@>qKAo}({v$Qhk#*;o zi;E?SV%zg^{t!{bg|WB7M6)M3b2RhVpJeala1JR%VT3%G9XUxdB$^7gaA=nDY;YsX*n@ z2~eTrN0~3gC4B3jw2RFTB;XXt9*T|TG|YRtLwt-;@*7FG2-b1S-{;K>SCTy^*Z*oV zQqGC^0h`M{Dzmc38nsZ7H$PR~*&jGL<1+x5-g{v~rPomZ$JdePLU0dp=rnRT7epcL zPk)V1ezhg1eWv^R{`Uoq-ax5vGq*-_)g`*YXL?h_l1)~^&kp6j5h!fxEu4JgdyT0k zb?RVh)C8yTd%t_lTepGiva9l(X$I{p6uvJ#zY5ejUS%1?cCHda#I;Y#rDM(E72j%X z^|IVMAWA81CD_1=MWaKAZN{hV&9UK~8B+WcC$N{JOBN*?0pnOt1=${I%VM^omG#HO z41v5{FHF zN^#>kp&k|IE`Hsw0c9H_|C;LpY@>hYsK4^;x3+NgY>V--tyuPOBp_yJ*(M2c<=_U3ILgN~&UfX>Y7W98Bj-T&b4z58>qxF$nz|oi#`&@&q^}aL= zNueO$>Cdjmbm>o2>Ipyw{scDG6eYUIFy)5Qn8+hHX==^d&rx<5_@~GIZ-mqdka|yf zKDzbkX8sjK>c1Q&sJ=qk+%65s1N%lo`3~ zw$#;>m$%QQ8#Po(g3UuIWCRF&Dn$hzC zz}@*p6r`Fo7RQn6r&z^7n;=ZLsf^U;Cz5yZ<4 z+O0@M@AtZ0V6*|rur`D!!mLgHNV`;GnS2R^r}mf5TvTMYF;1p0&0seF3d^{g8FSjI z7F2un1L`4!&Dh1K*1B0CI}`kKE_)93YZse&qCxLjW+^{&vz%m0eJN9<^YI`R^O-1g zeg{`o+W>`c^a@%t1lBbJ!3*f@CBt{@F%K)!GmO3!j?j1ul$DbD!!ehWIy<7S7{TbYV%wJ>RY<{h1Ge{Nw43+s0n+v%uqN z_Amf$io)v%_%XE-MzT6~Q1ehD#jX1-oE@2%CfA@qpZeEUbLfG9rcY>dax|<^p!`># zhWNUWd5u`0XKe;L4xvny4@hh$J(#On z`P^y)u@xOZOCiJiwS=YoQYs30?{uZy``*~&MK6NWPb}TNYE}tPgU@!1RJ(F})40hc z`=j&EcENJ(1t%qhmMYg7Y`k++z%%M`mJimbEkMOswq%m9Va^dX_7x$9py}H&O1^`_ zvdPzwp89XY{x6!gKIS3~%8_goGe0t2uF&BlmiHHf-X^|Z{CoNTKx#aU#q0zW#b%2g zo_gNA`;BK=X;Adxut}glqxpa^+$V}D{^=z+1q~r-7m;b}DL(CGk04CWjAGN9o%pBO zXKxl}136<}N*#7C101om4BhT&zV5zp5S4raKyZYB#aU+30Bzh5D4xI7lhxVF8cWqr zCWt%BHy^_JXO6=(kvnXPI&*Xtf8X++_UMOw4~@9Up8OzlcR+vIpK*C`nhSA#6$zv$ z&h-9G@Ae3yenVS1r9E6>{kXB5o^=FrkcG1Vss10|#uxdkYop?-5(teKX(O5Nl?pg8 z0JviP)A8`v_lcVb*$%t|FCGr6U5IKQ%mt9T~bhwTsP>xCD0C?8hM;86I;ZBFC(d`Sk+M1 z1bV}5;GV1(1)bcHTpL&EQD+wpNy25k6xd~?P=ph*BJz<%qx%05&)3S- zyIV`x`pv>F^@W&=Nyav-J7{6!jM}RXosOos8%QP5T;`X#d zSNCb`jH!NuNMCXSC*U>P!AMGuySSng9XL_W+)We<{BR3p=c$09^P_Mi;m$6PZEPI8 zE5k|?{eGG8;@b{9cI~oIWt4=>>fiR;Hk-(b;!||1dT~!gze=@XnY<|r{w(FxVGQ65 zswoPx^_VHqJ!n%{Ag<@QGR zQQZw$kzC{^`df*k)R~Ygi)F0cEx@m9SI|slEPgH!nqU=mf(|yfRnsL&Lm_7IUM`hG|{0y($HsX2C>bP9ONI}7zN=Y%uz|S za^NFqXbIrBn8x~BNpVh(j+TD2yx%8=p926bW=enVv}jtQ`t5WORG;BfTZC86S5GSH z-^iL1(FGMSG=9VP>>|G|*&y>xJ~jwSS7j}BB+=pQgZ?}T!I!adO{$GvCXQx^vnKdB zgm};AJFZ3?T}B8!uAccoCeU=p3mu4N2PBQXR^SdtqflJjR_=0VzYgTv#*V7Y1Ld*q z3+IQWVdjlZx{1H26LJC?#ND;d&3`j&=o*u7xE zM}}`e?H`C{F#*Z*W9j0Q<6P3)4>nVX*MbhWDIZ`lZn*SfYtY#X|8L%3<~#0(*lI^_ zlyalrHnz5j(v@*cD})&Z*Fm~N`;0W_n9ihMY^H@;s-#AdoZlzUP|oyj19GDCFdBLl zFV?h9$~grBAL^?N9J*-n4*@mNe+Z!_3cKQDQTRF;ZAt@4-i%eYuQ|)j1O?8 zHAFIfa2G^e`gBG#8P$HEBEfE0vpygJj)fUIWen+ zqnGm*=?T{R&QM2RAx<;AHt*u*_5H5wmO@IxeS@5KMQ4VLnGYSniOE>IP00(RZ^{G( zj*Kf&T<8Nw#tlJ2kAtYy>$ zo*EK@bD~)A3?@yHGe09ow=2O!rxnJ)MyPgDa3$dYKXqbj49$Fwww7Qgf?@twmiH~% zQj;}xEc%rmZa+HCG^|ov%O6BX>>ut0m0fNbPP_R+$&O9qJL7k15xKIJBn!im3#7 z*Gi?mq`zU`f^niZjoPEd>Y~t+;501D$NBa;?5_!Iqt()}QpaDv1ZgxL6&6w1gEToKbqdbLb3Y`~3{d;mB%;-LxY{m7Qsc2y~-y-m- zy*sx5aNWKx*}28@K`?puQ%SA|H`O2QY4QH|PI28w8OZRudtV|m+Ak*QAH{C(b4qHb zcej!&UiqBBnvMsqYT(BBoWG+h;)4~2!H$^djH&%4Br7~rMRO^nS3p==# za8%_H=_v0;M8tE`L(GDb4plJ+P<8RlMf8Dz)Kau?(XMAcHN{uo29i?ou`qEM!iW2v zn(w&UqfvYZ$2ysJ&#E(&iE_wf?TRS66{l$tPK8uYtD!=_7kFX(`CbTnyeaBK7QByj z#laZ8XVS6&UA8PUkaktI#yr)=ChrSf<=FPwP}}z$`@m_XknPgN+j?F+wtQWNw;s2N z5~oYCepLosxms~r=f$Cr>S6W3x1ncS+iFswYZa(0xEsIxg>`0JI!_>v_4Fk;lLC;X zf9m-|Ez=`eDjFFUK9G#5v)r=q0(`rE<3Mhcm+mteao=$jC6csSOPy8~A2L_<{P1N3 zk00-F3o#VIh!e7$I&q7(CPrrtHLJ^p!Q1LFzXAGe#qoYWV6?z$t$})zx%_5RqjZpH zL3xwify}d@9If`gW&}G`a>s&Tn)yDd&!4d)nz2vNec1SMR&a~2iUxA?8R0s-ykYsN zEwLj>gjD(Ej{2Q&uS&&gKUs0BEbX_&Hb^ZI8Zfv{`lp%TxXYDI<9k#ytVk3R#IMBU z@2&J=40-eLxKkg9qCL)E&mxKs5|~TJil@MJ&^5kfx>lH`ebFccr<<>|rq;Ae7XEJP z`qZoFSefyt)u&=Pj!Arg{#v0d8%toTAYzsT1qi>FqzN^#OD-d zLJf9gJa-CF9jCEY4#=W0SB7y&@#p5q$1>_NkOv3wF5+C-o+o&H(Hj&^J2cZMjuY?8 z%Z`I}FvpRUwe=SdbN|>6IHsCnoyNFH>Q%>#9OkVdR)^SvY(`)9+_O3^T{V!%(W2E_ z5TbKdR1mU=<7gbwGYf4|oXO)J>A2#MMI}giEgPJFaNi9JP%}P2%{Y#~0%O$Xbp!EF zHYo`E-$IjYIbR9(U7EIyFWuC_@nLO7gq^&lwc2YfZ zV!zOlFT(MB_cph&XX~)LKOvr-H7i>kd}M43j!$rx9b|D4Nk?UTsNe2Y6CC5y|B%}IQ>|cnV-x6w zi?YY)JqmwAS9rtp^8WXU2*Itx+B35N<+Zww-ET-|;#dgf-d42x_v>SB@7hdzYT3${ z)d|4=_D*zw=jclKdVTs`Iwj%ga&2pxbk5&*%m8QI#%jJiISO#JPfXxGV*;`yW{k?zFKm; zx|C2(4C+}C95l65YeTh?a$u~*>2W?gRH*omOk01S=l+wpUB=*Pq-{pOu0QGEyJP~- zj)E;;S-Rb}9|0JMgwXKNj`043^(c^>)L=H;_~+2m)Btr%qOPJGwKqxgb+27k_K&z@gO_|O zjpIkrXH%3Od6iYL*gP{yeL~lcG!q_wEXdzBdS1!~OPjo^(F1n3Heh%2L#=au`}8t& z;(`9>QBvXAdm)ca+m;i~fw9Gcs|&Cb$4<2e4Iz{qYIIjSl zGjAsqZ@rsTb7pHkA~=5ot{$j<8$Q7tQkLmZ(UQ2@o4E0cR5%9(g=-pyb)%?ybLm0D zZtIy9wIxvs%CItFEdQQ0w_@K_YDzfHm-!^FGwG*%j_WkR2(`kIIz|iOyTbqvMFwh} z3BOfw25P>k=h>6bmwrJ@t6|A7lnXo@^fU?7SGimr5s$g9jEFka9is;hF!G5V7+~?* z!CRmfSRa3j$@l2K=nNs`ZZUW~jjmiJeVl(;k6o(8PLZm~2f6WJQ0}RAJm_*!U6JGO zN8}f?Lqi2u3Ww%{as1>T{hU`qkxDBvuQQ{sbS__lglZ3^y7S(q>}ywk9UpfuGJ8C=4FFtq+@Er+K(em z2_=CGyQe>Y{1zC+E}9v|GMpFithT7TALcFRf1t?Q@%JjDikH=jDPvV;V@mDrZb2hUKrs^k5Yl_VD0j;0T*6eZg@|82P*a>cbq z!{NO~F;=+7OO8DG(Af?l$$5M`-XU|do>~nx;vSC0i=ph+8!1hBsT-n-@_V`pF)oE% z@pQZx=Jj|nh7eUtTI-AFztQBTVX!&4Z{nDD*2%YuXGD_6#ymB8iCM}{ycnwWz*iyQ zm|%=IwRYEQY!(YTsxnIan8|8ApT_b?uQ`1VZ!J$^dF$ZN4lnXym#w{~-}H3prRpyI zQ^EFH<_PRopzNre*4a@+VRhukG?c)CQt-|Xzku1dkz{1NXyGm&ncTxxU0M9DPM}fv zJ20Kj(u5R+JrqA&Ew|IFqQ?B2gcQf0mey>4Pn`TRKek!TUj^^lAP|H6=jb!Fx(C%e zqt%JOAfqXWTf86yY^cm#eob@(Y`>%P-v-o>OkwM2cZ#`I-~cOX0)RvPlx5Sr6^0st zhxcOIQk>a_Giga|Ha8;RobB=Gp`8m5?adL-+GoLmD>Th^QE#L*BW=ztXw!~_^!mmF zj(IKK$KgCo_Qx!K<*kS2Flj8bhu97LOOFDw1x$p)E%JW2n40@;N?HD9Vm`2sws4@9@RSOF3+!EJrFf1 zTXfVW(%)M9ZIyP~y5{}4dWXy8<<&bUmHg4d$6dq%Ayr|3I$LQvKj1?}RO*{db*{wO zf`Hx_YdN>gcK6iLT!3iQvd@?Mv%_S(-e&|+SgsB#w2fe^(lR`S5BtVFiw-$zB833S z!|f1W1OVbCo+}{a)hanBIO5bHp*A{T=9KTipt4s4q1s(bw;ABBx*AQH1`RN_r#ipI zbNo_q)>w#PRc@qBI6vObwy?c_vxr>|4v_(s!N=pV&qd#@02GE!TgP)LSg8DuHA@4~ zZ6T`2bUMuW#S;|B{)Nb02*g^!J)}|Uoy_y;tf(Rrf_>aik?|KfgJF;Fu+cZnXvE6?|hTj`Qkj58CAC_*vGH4Su|e{Cgz$AVM7@^P z&!gyuCSET&IJ8gWJl`iT{x)ThxK^)HE$ZMS1Ii8fi!!9samF`Xg~1OstA&(%^KXON zF1q&Mx?VUg7b4Fr46ueYP9am2c9|UiqdP3(^hK#SbS-n2r3n)2tkWc z?^~(3f+oVhD8}z}L1mc8_nU=*pH@e-7M5%PQS{moTbjPI(`NWR_SF4R@=O5X-X}>P zE9zNZtb2^}VfJzOEG>`NWV^xK>vxk(!@uNi3+~RgEzp@FeOCi93@ehVXP0V5S^GAv&ed zIn56X2BtCz=8VtqoymkAnov_R(6rXc0}?Gst3RVod|0h*P(5xfMD>}% zG&Ic{L#{Es60~0IhtsgJqBVNn?kqOQz}8K8PQ99j)iuX;EIIKf&+fNd&ZE@4Sv{A= zCoP`}1V>M2(j;g~ z^e$SBZ=wAPs==UJBgrNP!DxjHDL?r`nGXEwNpWVVB%mQMA^{Dt`d(!eCsCGacOhGd zpFvv{ZH4eRoy#DKY>d}LFAJiflz`-~Sf$Z#@Bv^w%R)vB0DMa9aPN`8pZVWwqD zmPxO}9m9K{EQ$}3hGE{<6@amDxtwBkglKb0eS(H?^W}#mOXi+g8q`PvpGl)pR9CUe zZqzq@TTf1`li%5Ep$1CR%%*KR9H8=(&2;-7L^vgEH+c8Vny?jMUf;}V{%BgQ6+KyG zqnM9#9}wis1nYhKD@`?$wxo3ha~2>hQmGj8Su^f;zVSbGgBLX!tUJ&uPItUO+MF-y zsDbC#jvYSe=FWz8Sp5L}aoFtp?6k{|i|>g-EFKpUv0X6HgImkEgo3%It-Iec6CEp5 zJN%BQ6*xGHUCi&O>2sq?&;*a>JA@%pkf(F;IK(tWGoXx)v3?gAmvJ3-E*NV4{ev03 z0(ftX1JFciPZ-p*uea)_mIFM(sA#c^vX$oP5NNS0KeP@BtI~e`q3)zKQ1XNEK7sJP zB|ql6+o!Pyt^cVy&Ty>=>0yi|M@iO{OsMGx;{#o8mUu{RkC+lV?U-asm|o!bW4V6TZlv>6hRSZO9UsEdy9Lyiia8gi z(%nmOHu+si|L8mqgnL?@lf3FDB0ucP_yE0OVOuToiv5(c+<;}AHmziOxa*7U%(J|W zT6OgNVq?S>bDVYiPA;i|1$sPSS8eMWV~j6+G;M@$%P3*q9~$6HXVtaO8bP>od9$SY zUp|Qz0m3KA^vOhX-+r#csVL^^eTn8~EYzl|`agF{><SjjF;rQk>m}yNYm6mTYjR z9}G#0JRd;-WFQlX0z8hOuGk$~mAv_v^)0nVBdwc$ckVyv(q~iXp%0#gvDYi`BWzC| znn|{K*HdTY=T^e1HMV>>NTx1hSLghr9WF@&rJc>msOnGaC%HP}yr$_BMQbJ9Q+t`}k01}}h_2ii${tu3qMdhj2 z`QdQXv(K=tYXG$3pUoN8504V`HjMbiz;s&4t#6N%K9pZ&pnUH93R#G=Zv)9`Xg zPS97rj%dCji%%TZops??2q^Hog0j{hk!nNXj`9dSyR_Wl)z z4Fc&@`V6Xmh-v`Mp%m(CY5hK;Rw2ob{p2-Ft=ZBYiq8;W`)i2r@Y@7j0OX+rKpx7k z*$#aD_UwexEy=D{HK{55I+7D938mZQ9Hdi@&C*@SYaaUP?iU&Dh9dbJ??PF*}#IuWh}>yA5M=$cHD)!JbuuXJIQ@NaDAa~`-dBxWI&6%eD+!UILGZ?x=85n*Zg=-?fd<%muk=IqoOYd8_jKlhk5e#{rW#L|EP zM39Iu;%{y0cL!-hz2|4)9Gp}9h~_;(e$_-2RKjc|3JeoLpCHVyu<-lXPNIE}!3|Qt z$JV~QGXKmNcwlVnIvibC?Gu{*z|@F@UaLXfYE1n`m1r^j=ixD+N?^6Y#fLMTJ07F? z`IR4I>EIeQYFlvgsMutZL<(lry6JRRL9Us3v*LNiVZjLYRIbU76-B#^v2#p)ksRD` zk?LcsUWjB;SGDT!^pIVA;-@1NtTX+))KGS_5s!&d+w*uxw$T8tfvwvy!o#9P9PJ|j zoedOqTnty;Z`prnZ)arexvw056P1p-5bVP>WeOK(dYtbLrB0=g^+&H9{kTq}^Bq)| zq}EH8Z*a9yUHQY0aT)sJSG65)vZ!qCUk|m#9PVq_fG4;VVJ9G8ju$btG#f-_H79`b@Pl6!sI>`R9k$u`M1Q zk~>xL{lZ8_>xI8%a$WqFT+P}0bo5Jh)3<=@$94-DI!y4k2DM9nLM^oOdBZ}SR7QQ2 zo(NLO5Ud!rxv@|{jBwW1v5U*vuE|}R+b&~%sC=?3tiWuljy!jT`fMryJF}^VQaIzKTY9<(TyZi<+|`) zH?_%V9^eOTEMHdXj!lB|IAKsy{9o`mT9R!&Q;ps8_!IhK<9Z!>s+@hXqtVApp~VCT z(MJ!``hIa?sw1R3-^$F)*!wsT-5kQtyEdeAW!k0^AAd^x2*O>HHfe6FDSyufHp&EB zmHFw`7aAzq<#9|ajq9Ls2nP-<#M%9D!zQ(?jJP=IS<{-Pzni~Kg*4ZhV28GRQ|3qf zd_O1jY+($AqlQ`A`K>e+g5>Qg9o+B7c_KTycU)%ZwinRqwvHXqJbi+-Wf^ScRni`8 z+_m8QxHxt^?1cl;5A!capepE~H)l=4Bu-d><>eJ0Nl`0$DJ=e%7~}%PAbRj@NBuNw zy4Tnh=HFJ-PJT_9(Y#!qif_U2a|Y^25ZD^hI5q9F5{)b8Waepf05b-+Xq#0$fdSbX zFeFPZqxGd7M637S`W;z!6*XinXqquhI|T3o!mV1dPR$`w-;PhMkgyuW9=YW647Uk9 zq&QTABkXy7tmm!7k*LC7Ot&T;v=w0&RV&W#!5_Y?gh)Tpyqo>%&)lo8SJkR5EZX1) z2L!4uzL}vmjRD>Xh)?@|ycMA>M5|%Y7Cfe>8nw~&bX6shDDPq2GD^Ow3T$D`oiYiO zY!z(N`&O*&{?^n{!)2^Kjk~aquy)+ogL?P4X|fJzM<1Ikti$^H&16C*8zi3d*5_E} zzil#J)!vfq#6-T*X2I%A{%oA;vmUkZ|B?08VO@4h*L1gZhX~Tr-6bI)C5?1$oC5Kc*|U6 zzC(x1Bh6WVCPG^_gF7Cy#Auv!T4n;DG#n(r6{#G*FUtRCr8}@C$3G^1f@Jq-sONmF4QsKbKE{vDdWBO2nf!j=Xz`tH9icw-9 zg0kbhmlstr_WI@yTq(b$mt<0TTSA4TX`?U&3XhV;Y2qmct2%_bEXKM+Q8PYqUd4K7 zGfo0VSTjJ87)lSVIZ&GYFh1I%?-Dd*?B%mY>z@C-c@^1p2Gvty^>#@=(CY0J_yMC= zvfsbQs)?>Xo!9h^NbPfu2EufnY31pQ7Z7qaRY|=3G)}XoH@ngoib*Wu>WUGIjLM1; z3*bkVHCNB{%C3%U0{s3)_dxin1{4K$tkZGZy;|jcM4GQ5K39nu_SYj!n;INw7O7KY z%K@*+Z@Rt5TEbL2$(-n*1p@lR5*+y}seue>Ce02E_&`QLR5hw&55MH+Kx}63vmw~( z!RR{&?XwoGIbz@OO_~T`>3G^5a<$Q!=ugH>3p@%8|qvy?J7jMURqTI zd=s0s%JKgmm=pD0hpSDd`NgSc%1?~cv_dUr(BIGukE^Coiz`yfXml7wNg^8wa6d48 z0%dqoQ>K+43hzD54yy6a;3})RGD9;?gUW(+>RQ=xpErR&nD++pUp@TlWv4JdpxHGt z^)Z_oydE@lfGj)CFSej`6*HhD(r5ufm(CEUn(_ zEO{z26LUq;LtXLK+Z(?4^>|ZSSD}hx2KS)$^px;#s|oP$ieIZRez2NgPf0VaI2-=% zFEl?9t|xb&POfDt-Pbae@q0E^_6uiOqsG^5ise3_Lvz*ly&+H20FY9e#!@j#HF=f= zu}fUt?lE7IZQt9`yO@)B7Gb!h8*A*vOpMf$Nn*>+6|o@0kP#~+jTSei{P&s`b*TrC za(|SZc_WpB>vUn%Jl0VDuOLOfw3I)tWgPxArfcs|;L-DCLj%>}3MvF}$;)TMj)Ozv zH5~3&&jx-dwZjmd4^9qJD#^0|mVEf5AXn73w9Lo%@6;?l`d+zw5M$$LK2C@F942yj*Ozm)^DLCQU_zTUJHDdB+V}wM*O+oWdRhbut8^4MR zLq}Vix0TIQvG?}>zMgyEwmk!)m!J+f^c@TJNT2mq%dzNusT=>~YU_}NU4T;Nzhhwm z=q%fW(Ho4~yU4R>`AZgg{<@RFpxD&f!yYmDU zI`>yl(vM#ThkpGQr;~R8wdLy1YyCQ7?+fp|evrpJ9v>jP;{bRoI=BH|-4Tmw?eN0M z6vc9VNwC{B^lQbnM2j%R0+1yvgc$MReC+OY+V%%^3#lV#N2x$%)t^jR1HYbwConUl z#vr#tkIx$_hkyfG7_Z`5t#u9H|4L(tqvkUxYi}oAkx$-BZcZjM-S3YL7d){_1t5d* zDb~wyi`Khr^`$`bsdxFH6muMP;{TW~e0X)m1_EL-n|Ty{7D$Z{@xPe1i9!A;^|5v9 z`ur~huAV-jL!f0tgXFpyM8tLs{@PA>d>${&X-lg-R9n(aa9w?YzVA-N2^{Sc9)?e) zF`=+1zU7p=r*p#iCv?-wH^Lv;{$$pnd3aee4AN(A?lV6QPSWJi_v8fjyHv24FzCk@ zfq-bjV5K1XVvq7OhuoZ5Ul-HK%gsHZyR<9xC8`Uu{wW|==vM=B1qH6iPUGfN(u{4h zPBWl8GvN=um)IP=!{Fx0sn~D2dH?+x;C(+X`S9RPy)2*7deDLHQip}IGfP{*eu9~I zBS;WsB~$?I8-QV1sTmcB`y%=aY~Hx^zSe8rU#ll$Oz0rai2sZAjo9uAHDYA{TjKL} zO(!2wBW4=DD@NaC$=qCGo*Zn-nDl0a`axp#iXzdwr zZoc*O)FZT^Lb;+~QcY@Oz7%FSaw8gA`)LJqbeR!p@u#U#CIW26G4I8a_=CazD6=@c z|Kxf%Qm(&%(y;B^XnUi%B)!Gfhz^_N^=H>-`dhGsO!a_6NuyJG+hdYA!hi_W_>U=g z&Lwe(zt#~;$5rnpS9kBwVo>?Ud#LSNkzt99llXGqonMt+*EML5m%*yWab=tgtK~x?4(7=uBlV>LjS!lWU?~id_wP-Gll9 zuRbF5^;qHnnZ&xQ#GB^cJcxA2hty-k>L@?qVL9*DDL#g@3dM<~7lL}c=3Sw|hSjtO z3hde=VP_WbK1k><+-@@b@%gyVT z$jNsn26yQjA#ZUZi7?F!A-o?V?^&V8x#q1?or-c9(2Jg>9RL6%gT?e-<+ zP)x8IW+Ba|7TBGwLtv4S2nss>h2!f@THm&cJ5 zA9X=+&o6V>9gS{5B9uGkByYM2hFzl7Z3LZX^RVolPZ*ataT<@x6louW-z|c|n|EXqO7Y#O2yy~4yYA8!-NrJHvMFcG>>hX^6$f8>Is*{(X zk=*f6ovQH^vSXQQC?(W{!b> z*Yk=@&-z+YznAWu-6rv}9-uEvi%rH2tV+(b;N0h~^kX}F*pF-YEt2wd2$#X{*Tm579d7M^w+GN9il_wy$?( zkbaT%PyQE&Q3M-f;BsFWn4E-~{O&c*v(jcK&zEqHms$jw??g$r?V@6BG^t_`-Ka3? zBB^+OAFySzTLaCH=2r$4t1FeeOfNS@2vTA(>z=)HDGJ`ED+K|RKWGy#&| zSQ8N^bN0SVuLLsrnPMHu9i81EP~d-z>60E)=bU5Nwoi$J5&+*2d|eML{{i1dGf6zh zM_30YDK~ax&_wmSLIBS5>%_Rw{w&p>Wwhw=xC--zPWSvIrmQiVpl!#11o7IHkn1k( z&-ZdtL=ombF%nz1jea@8vOkveWpo*d=imBAL#3$`OrAGLRjYA-ygW}1XBCy74e7#Q zzI7{U(%?eMhd9l9JQ$4n6NT#ie6aU2agi}$+sbpXUh3=&zG2Q1SchMZSQHRz$l+Md zVqA8#TRcit4IM$)vx|Rycs{r@j_Wa2Jq4#2h;P#oWC-L~NI%TRBb?*61Pf*+%iuUp zPQFyjs`<|j0c|2mrp#}hVZTWSs&OtH=xR{u%H$d@K2#fa zX2v$#&Y{FVV5IizMf3z+Zj(K}4_0bfZ0Eo3aiE$wS6UC+$?OW*AJa$pRO>U~Uv;t? zMS@sC#DxLSems;v-@-l{`Avreq1!I>Al5!g;!839i3hl{Fb5Exo7P}3-58R1WH@Jz z+#c^YbJ~>-C+Ckl=V>n9lHA~)oHp!s`0YL>lM{7fYpXGLzJX0nueg;I1#v)Q$qFY3 zukJ6f%r@KnN>$^dbzUSfFGm}Mktj%Ls_Bm(1E`MH5Y5CY)9NB~*Bi}BuYasgx&~Dc zB-Q`SW}2VLMAxBe^g%?jbHmNN)BFCMHl28|GMF(PDjk`CTao!?|JV*CJhpJN*aHa8 z-p*N#tK?z2xR7m_=0Q_`e*M%4(@z{7Ti#nZPECS*!MZ+9xkFN@5Ss5YZrKENjBd@} znE)18qsNKFtT#JE%c791lwO3}me89+;14|4MJ50C zVW9RIy%W&H+4|+`WJbP}=!j+8l%xa7=Trd6;vb|VcOWnMIMXlHxqN6rwXPXyp)9S% z>bh&f#@GAvi49%GZZdEvrSj~o?ydnQSPuouC-G&FtOpfR52_ewm9Dgj*^CmTUU>hioKpy5NFqzOwUyxl>=6bC7aqb>JO!kH7B=i45V} zCe-JkgCbli3lx!?Sav^uOe44>vo@->g2Tg`=t=eCE!=YlOV|01KcgvxF1f&xRyXu3 zc8ZBRP>r^%y*b)&Li^bHEGvAS&j7O>DlQ@!_G!*i=Qv{5a%j#4z0Q}-en zZuv{s&pRW*8F~9BNFc=g7-)~jh||O8Q$15KU!&0wyYKFE_=c7WQ!1f8<^6|2f_0JT zvZNzQ$*q(X`6OF3+>Gj)ov>Oxm*-ekWakiE{J4DcGqr_lt1bmYj!3LOcGT&ue zoXk7>QuD!GiRXJK5mCk?+ru#ZcNOAD-aDY&d-=A;*dVFt7DDo7Rr-Ay|@CmBL|zAeTTxtd3et!oh_=SX;s@x z^e1-YZ-5>J0h(=Nr{xzV>$*5(JM;GT=X??CNhsMT!0q9-d)I?Zx#DKFhP$h9<)^du zi%je0jTKvQ5?i~C>#fjVS7V^=(mS1Ql{T{ zL96A2dg|!&n^ilRR~R+1lv!$#9|?Idtqg(cN3qA88?Vl)ds0l3dwyu7=;UII%jV;) zk@tH@A%sr;1i8#NSO+w63f7Stk!@6zKK?x%$WFQTsryK$IZ>y5kGYTUimpx!|0kjpLK#Cq;DGAb~}hN35N4B7h_kGC@_C-ge@`l zapNAw%U?J@!K2u5o!U`JUg=%kTBxh)h*$DM zrePHi8mz-4c3isq?%F@p*hUA?-nfFwd%|vqIMWlta*;-($r5B@yf!M>YymQCwlIuF=< z2@7KZA6$Pmty4WlRotwmZwC`BjDb%Wmww09Z0snD_I{A_@sfQ1cp)wy_BysIHPrU{K}5P+ZPAbxf@Yx=EgR|ggHM;{reA*HLW`&NP& zQA>PCaU5%1^hg~kScbmzu5wOE-`=I_2S@#NL}=01kY0jG#UK9;>h*F#g345}M8ZL> z)J8A#S~(*V&HH|bw=8}emB7Kad(fb$0={==RXwX?mJ!dna5JvZ#DA&xS`Jb zX|e7XX%NuMoL09V++lb$h>++;{qX=fA8qtqwfuz#nlW=&k!D4+S^2c{743b4=2OpI z4T_okjJY|n=Pfu zrK)foFsAp2O=PV1EXRIM?`wH~PwCu|k+;&eL}%)MtCLdnBvm;uQ!|kc0BR|f?$~q+ zPUuu*)_ygg7pf{HT>Wr*1FfX7qsj7hi^Wj7Zb6wn@jpWS0*16 zZc%HGH%DbZ3{R-_U+y+33fcAOLWhwoSbx*)Lfye_VDK90sBeAG5=CswAh45?5GC*G z)PjMIIT(N-Rfc9U#{OnFnlvIh#G;+Hgp(rsO_3*hUW&`0hN?3%tJNFFKU`b|g>86T zhzwi=xqGCyyIfb~`)NDAB`|6BamZ1FGoVCg&{5%t9Mq`!VbifK0s7$W=u~HWCcVcI ziRmc%{Q9;_Xx!(9lpVAnN*&_)4f0J0_PX*w1%eLGY5`&gGJzlSV;S#W$_8m{OTWPx z?*$cV6sw_};i>~}UZqM%kfDPHIO;1LM=pxaF&j!V zJF)f7ydRsb=r<}I2My-*4hiSaz-^`sa|W@d+j% zgMk@)E5kiPzIaCNN?f^=Kst%TK0SB<5#cUWG*`Ja`5-U7vP?!<$%zFswA*OH*y1!0 z_C`K=YL4K|(X(KA?Vwau#kD#qnv%+C{EQ%!f)-;NSi=kTB;kzu!juoxyVCV&Df^~R zJn1qM$dBHswb*4S42hE0eIDte(Wy?9(ohNC+u^RS#S_2x{M|pP5lrz7iFpI*)@BX$ zNJnnX{?z`>THR&Z`;z;G4JXj4MF&MLnwIOmTyuh;GHcbad-uF$*{` zYkbI+@n!6!d zjC!Ewvn>3*&VC0=M|SYD(bK40&d%x@dDo{ zDvuXkE9}DJ@{4Km$NDI-?mpv6=KJu29QBc6*Yhga**MD#cZTestH#&F^ilPcfPcy;{~7S~BEMAweV zM2z%c9CMYcCcHZ-a==&`LIr2CrJEH*q=_0A=9 zSaDC;vJ>7NMaR>jz^&@p&wjd3>q|EHOJGulz|LmOtF-0EQik<%G@tcypBTpaJ4Vf@ z{<1vPP4T_j+n_g!SuSR684X7DPL zIR#@5H9rnk}CMToLuDqq4h{|qtR zaOC0|8myt|kZJy!>`DBHp?D zx*Htsz9%tJ5B_7_3I7EpeR{0OO{yTndw#TC6_a15`VAe2X654w7?;x<9ian!wH0=qUo=k0~* zP3y~vZr;_Z5dA?cq&ADr5PcvjACFvcH8Ml3*jb4gxCd7+k{j7#o)PtB`Yw#iTQYjR z%*;(r^3P8WgO=lmPI&XjM6BS&(+Z9sFr$tMa|P8l|Kl}n^vB;R2}78jp8~+MSI)yG z*bnArSPBO=2E_=yf7T=Qkfec3N|?jOjf$}Dpq7-Vze};ihd3q425CnQbu_^qK>sNd z8gQGSGTTc^+JY|RFBR>dgabspH!TnhXuN(>6u28!!K~=JV(8eXe>P<=9Do zZ3qFM3rNbEcX-XaIFTEq>)?{svaP>ir?`2H(BiyCMq4ew;+hr
  • na3q^WSy`)N{ zySuvrYoHYtWvnx}#U%VNtkDk`V?lq&KucbhLO-e~*=Vy6>mgdZW-LeINBw>Rut4#gJQ%j^3k*mD$paeVmJ#%-9uSym~{vR%L4wlQ4Wmy7)nyI z+b03DB_%LkyZ64Uw7rsA135l;-OV_CaKkUU@>g_&zFo$dP}y{J*OeORKH_bKONAmv zF26VY!l$CBtUdImn#!1%cdWv9bZCTY;!eaRS>-Wj()&2Fj*oUrEt|T)$^LnN(Ht?d zlWYhn#q*|HLv2!wruul$t;NV#A^MTcB)xoqR#;p}C_0 z1Fog8^q~4+6RF*1Yk*77NA%N8KtqolPNc6&a$25|#)^u1kJ!MfPym0 z5GI>lDhi9%+PxHyq9(?;ccPgn0N*OnBMp5b+U(Vr=&qp<>jU*&Kzpl`)?;SFBNUO= zS9d02nFaig8No`nTaShU{_5HBs8Fb;jq1UQW9|*4M_@SpG-g7hwR;F%KNyNu5S=Z| z$_4{UP+i%hDd=EkO9YPn^pD@Mfz8!3_%g$@nCI>&9Ty@zD6H=Lrx&M$Kb@O=vTeVc!7G^kCc!8|u6>hQ7FF?cP4IRa&p^-i8YP+GXO8p5!3nyK|BjPH z6CR4L>(|%{g=hPD%GnB-Y^wcAd1YEue8)-)?Od< z4@Hya+nd<+DmatMcekZkValyi+O2pIg;3kF3Q*+0X0T6#hZkFUaKfj_)nKFpf+Go{vG zLpr7_qQS-cNMHUp2S@LexIi8-m&=L7A*j|&Gdvl2KTN0cs6;; zD5%SO@M9xDe(C&Ubnt?BIyA35DDgZTsgYOHP&{lu4x7F5hCB5+Us|{95pIHlL>y&< zuSBAsXYUp#J>dIhZAG^7OZtx6sKIglttxbj}B2vcvY{T5Ze-OY>vw@;Ld=g3jO?UN~CbBqaeTQn$B zRTBDnd3-u>54edUsXl082;CG8-c)2N4mRQUC%AkDdbtwH5ZIrL{EG!Jk(z~1OC>B< z(t3)AM*{ByzyFg_z%FNp_tHp{-s=_5hO`tB+!7rB@~lX&bYs(l`YrkeU80mdFScqmn4mW@cY<#0m~$?(COHzb zCHdAQO0_W?HbnXv8|1|Sk9r|t$#Mvur@aNV-cNZa5Zesd;5z1|O7Yt=Yw z48#S(5&#>5*&f;je~uzA#xWunB2s?2am>EphF#ZAdfa!d&=p%A)`}!#&Ffb#_@<;{ ztxK@@!$~Pepd=`#9)()+WgjYfK;bt(cD%wMjj=w|L;((;*FC*aXD~(AIP&$4n6V-C zH<}yr%hQD9wdyGx>p_YLUOMbF*d8^r&NF%KVu|spWvk6PD?o{4((NWE6WXeq` zOC~?{q)o}An}P7WenOe&s_Zko$e_u0lzO&Nsgu1tc=ZLGURgwAx3CLrX5l<7ovucB z<->#@mtu$6>N2Ijnzbl3s#4)MOz_hK=dm2XRcI`pw!jqB3q@;y{AfK8)swp?%2Q)LNO{pJ`pt|BMc4x=9DDRRY?bw>1TXX!vw~oiG zFT$D=LOYnNGipFRWZ-W3R_<0+5uvU{T6M63^!qDMWeuxBgP3+i2t;Pt;n59p z3og-SS=hx_XxgX|lK21EhDD(Zb#z_fH%gU21`GK6Us+C}V7KQC*Ck~`pY_1M$ymkt z!ikB6M#hXJpji>hE^J(!4GfbvK4OF=gMK~)(@|9a5M#h?T~ZO4g;+TGun{cT8!)%( zS>iUYinpH=HLKBKTuv~Vwbh|}VDQWK{cTMRIcauqvY%ublSZV zh>Ttx(qr)J!C}<6R&1zf|EM47>bpCxLon~MC8P;vnweD1|ME;OcRPydj@7PyQ|`E8zi2OQ$3=HuJ=S20ju2?feWk-mounuJy{s70wC+aep| z>F9J3?k^+-XWP7KkH_h^McyQjc*BU;SU>u_uv*rEBk-OIiRQ)VN`+Y|@;ZqU21+q4Tw}C8Mxf z(x${n)Rx3slQ0^Em)JDQ--TnhRHNzD!qkryk&WrX5dQs05?b*ycVI#ZxGF@c}lY6@wnI0!IJJ=Dt zOPrXEP$;E73Q;HH2*C8%357z+P$HHY=8ysi&z#{QC?hD^Vif!nu*1T4ay`A69Vj(k z&FMN9Z*1Og5%NxUS5W8nebEvS4mYjmouMAu=1rP86i9Ml!qW-iO(O1Xp^M3X;dkzu z?4hB02yvxh@%7{NN|@QuGUbCzLo%0SBchz(81hM|uz1d)uz23W=Vvg^+4cxfjiE$M zu(s1g1Zh+@s(ex!syG+n(FS?OPidRqhWF_5Z*5tA5qr7wc=gQO`;mae|(Wn z(A}{eK{fsxCqr*nZ!$?J2t&1e3)em!3NiME^V$}MI-*XqT!`) z^tQSi)j9ArQ}o#EyuQ4$etumnydv68j@3&-wX0@#Nex%@G2jb=`dAGGjF7srmxkD| zKS-SSunhG={w8e_0fS_)$Js_T`Jrj~f$B^zAu;eIl4fBzd?mbKK5?cFn^dOkv%i#78PSrQb~)Of7;K$%;r z-VS{>a0~NJsPXN^c`{X@CsQT2nzidYGGR(uy}@Q|l$ks@b~Ik39{6>}qi895cxzRIbD@1&MK7?U66iy>7&2PFDDI5`KPIc z`4MGq?|>}jrD=%ZysUv0)X>+4>MaK+Pu4l-{h#?&E}$i366lZe1~%-SLsEJ?q!49r zf+0-Ha-RuF!~FeGzuHrzY%3{QdbP&HP`}fq6^CK|{uxPTAK&7)-7{YKbpOoEJ2c^D zFFvk5p;{21uav9iL%g52yxZ$`=ls!8-`gpaw(^raRH!KN%>1?y40%0X-dEVj+-*;yKm0((-UT|856 ze|j+vIp^{BWh%ON@I>9G4n>=ON1ox=iboP1EQEnxu!Iuk!-N3maaNF}c$+3D0X&CZG#O_V9EfDL(F_xJ-ay``3C>N6 z+-{JeL&*~#VK>S=8}Zd)1T&;C@fIZISDb~B0nC^YE_Q$Y zFc`)Zp4jr#hol@No8)ShcyNcVF-q_sgZ{c@Zs=fHr=v*XLXD$HUibYw#5RFLjHToE zzED>>8bMXw3oOHkda0}~>v39&ILn5Q-yXoG>s z%fgH@!4$|a`tpuPj4(MAPYuHqq~WbTZdjkzs$ym++V>2&jFBUBPlysGZL`^6PNTKr z&+}rbNQA3@z^VM9Dym*sS-iA>S;|nCiKdaqsPUWccppb0hh3nOrAp2bdNgj^UxH^k z4QSbmrw)Tk7F>B951(~Enh@xwuMGY|o2yG-NI@}*Z8{c4+NdV|esm+nBV-d|P8D}L zJbHK26X=!h zpphRbAGOh3Lg`H=$M*#7(Ddw21*;lo_`kc;;~_lQBs?BrPWuttDhR0S(i9fn?u!}Y z-xk4FJE+5Nry&M)Yw%*jIFcGvO2HSOW(B2CBzK4wOe^q~x+_10fT3W@fqM?*@}kEj zU8q^Kn%>|9*fw{YtwPFMNG?z@CG z=OqrVhf?>o>in|CQlQ`XZqg)KkfL=%XLX!c@ln50CjvL>(%p9ra=UKOU5UN zz%~RQF`5+Aq^N&uswEOIFQYyb7IPmZmZZCBlZ|Dhzrud4?nHjJ;Nb2fy@=5G#Z&0= z0I6j_f!i4l-Kv99gn3KW>?QDut039i>91eL7^6cV$s}!MjYnDUi7pR+!3q6$5KbT0 z3eYG^5oK*A&n;y7VrpdaLs8~ZNuiDAX)qI$x{XHHTX3n~f`7C75)UOo(Kaip#SuQg zj)`7gM{IxlBfC+P6YjJ!cEXN8mKwa2C7vXuvPOWY!T!vxS{BF<{5Ol2(ug!YDSOo8 zFf{obJO~JKPWfvJ#>SIRcTTtXkXm!meE0UXYs~9UF+gtM5lY-U?M}+kNfb15?IEK| zveGH~QuWXNVbB=s4y=ca){ojsp^h}z-qMb(a8Zw21U`>q@gCLf8Mad&$`HDV&WnBx zUvA%5`;}Uxg30f|e_@{3Srg^FR>VahsBUA{t%@#CrIu{X)!(8B>#lkrSYckz`Pp;d zpF1SGA-TS*hAGtC+pqJY5w9df)N>*In817^Jm`EF_M0Q41yZ1fVddW{)D%kg+!NUp zM<=pli9%TLV`8~N*mdi+2iSZsjNPaOs9!TuGi)dGcK#bO`009??)#Mo_|`PCOCG%E z)6>n}mHnUK9}iZy*oH>gbU=ydX~^cG13j;@$^5%z@dWChm>Lvf#mi?!FAk5-{nwtC z1=RFCLL)}JT4DFIzMZL*Dr~5xP&kFUb3zr|9B;E!FZgdGe9nEA`v*BBqsT4Dl zHxAW>t|x0Ars8qffr{>~_Hz;LwREK&ooiB+%3YJna30(`&=3Z-Q#H`<{x2j|e%eob zj!#Qw{WO=4z7^GGFWcJtx_bET(=vQ3;#0$KqxUkp-+a!MI>aa72&96)`ha(0*VrA!w*7e31p>i z>@}@+XpiEo)6$?a<@LgNQ6~D-R*5HcKnpLxUH{?;$aXib9y#XzNMI;Ns9(M9aoKyZWy)}?NpaS5YayYLt5BmJ$PC1d+r zP~eVsT-|$ba*%Qk&E)tO_4`n61WgQJ^_%>_Z0Q|P6Z8GX5EgEw1r1a7tr`7qiTg!i zJLGfG-08ZBtR`JcRes{*Zba)aES=8I>gc4afkeV1vRytNfnB~)31-9F79`^VRFMm2 zzS&N!sVR*H{?D^Ie*)@g?bJVFH>zvJVAk!@QGCjYx~|1@3>L*+9KG&glE%fYQm@2w z%;3y2%&N{#dpe!o`OpuTQ^*Z{=>N?I((X#jp zas=_+i`UDXFTViYLg)zDFFKmFuc(emHwj9GO)8e2fH&^J$IIOc;Y)Xx+vJF-Ta7}e z30b(GGi%D4ArZ}ks6QF%%|7r?h~48h2`7=9^|8=nP&OOxLK;3LU5Ex(r6iA{gBj4r2M zs9d~6c=zLR!4K_uckx5KU7ysKsc<|F#$ML!*nB#)lPMcKU)gMFP=*|s^jW07+b{mvoW7r{PAsOBP2iy~v!^_c zQ86SV88pCFm2)D7>me^Y)Q(eyr!~j3fv2a776AYUHs13P-FyERb^ZzkGyXPcr%`NH zE zMTuX>L%|>-&bFm7TX;vh=y6f=4p^7h@5PhzIQa1vbHzekq}(ZERt+{9cI8B8+uz~w zc)7$!D`iba?2gvG2DK#1e9ue)@A+qUnR-+=#flml8<9P+u^sbD#*Ws`A4Ac4!Rn?B z5|o{jF1cu%q4W+g&JWNof{EUYucC_Q9$sC>m0wd!*l_+yowom! zHCMdD1GiJ<-Kw0ZF&L@7q#Xt6Pm)|J8zawL*X%=G4vG?b=|lFvG>ETa(F>KF5~RCJ zW@jaDt;hl3xk1CLNgh!M$D;2(upxT)3?$B9Ws>PIwO5-ha<0b8V?)90^%g?RIn8eb zm<%em>zI4Q#Yskk%O zo+l`W=IV&~Mu~|e>`@s0K6tH6a`o?{i7)5a1_Sv>{oa?swc)cHK<+q&_B=V4|1QSA zN(iH8M7yIIDZy!a0Dfi^+Bw1bGIto;4(2ZjvGRCfw2>q2iEN*})bJ|EEw8OFhiaR6_p}s?gJ>CCr=gHKj7zou zc-!_+wLR>54!2dGWunUkch$XA0f|wOA~!E>Fm6KV$tFNb?OpW{aKFjX_LXN(>iBrQ zH2;6zl=TleOseynUn00FS(4-@zF5xw* z^czsjf?pYV+BwI@`Bc2qrLFj*oD;4#u~>UUr<2jv?+Eu^GaCTie+xq^q{usVHQcc8 z{qJ?KDT5QrV{9{LVKu&=zw`^#ACjzmT<%4PY{Qp@m9d?q~INacjhrHT8^ZBYQCu zbP|c-kBg2@;(mbu_DM_$xc22IJ#S%g$Hx~sBn4oZ5ro*Txg;qWnc-5CNviwd68bsE zq1Z`cM8Bfou_t;@1}OB*a8gR}$MyWAq7HdCm7I=X!VKLhIY|!9KC0+q^0dtCpe7Uh z>ej5g_x9iPZEMd-> zzVDdK{p9gllvRFZ1G(p5Q)u?U3;=|wzbwsOA;eKc;yrIVuPEnPiw}LD1bPC}`Goqt zKOEHs!;i6*UE3mei1xg=iJ_J77(Bp6!aQwcK&Z<%8s>g+L*ez&RLjc^O%B!671zlA z!R!J63xaZd9$pAYru7@2M-HLwxG(1vT=G76+-Vt$k6zGUIl}z4XKXM+LlE|IxFNd$ z1y2Y>Be0G$a$^$wd)TR&?^H$$G%n6&$E@{6c~~c}1i7lvGKxT^?S*+5q+cC*g3`Ar zj6FIGIQf@n1Hfp^bv1&)gcMPPf2m0xKzHP8aY()M+M6hGm~CS1uq z2cl#;Hk(~I>Pqqq=3Xsia_@@TytEah5 zni(5XZ_Q-h2zPjc(n-ThA*>YM(>@75Xm|)+YYSh_B&``LRZGcVH3TJfu+38RtC2Tv ze;kHM3oq2YQ`q$PCd)m0A=Ay5v~iwb6!DY4#{qwb(WWU)SvV{o|96mQAj=xa#g`ffF^)zgJjme5({3yHCbzP5oAYH#Z8M`Sp$LZl&Q7 z-*1y0IeiKHPV)+aQ%`P_{n#hp@xeTV$y~rYNqL6dd-?jAkD}7)8M4_}y0Pq~wU6n| z>Vr=-YZ5~V3o%gyPW21XYc5sDS{g1xfALy0vI*X3Tzrgh^Je#Th+_BTmm$&PREi3y ze$$Cy;bOc$BCypUdQ;N1P$%~f1(1rm2~+a4;jwq4d2`=px)R-5fCyX5^f;SofE|r1 z^;(UC(ow>)KGiNAmm#NkjTun|PiJV{HXb8qDIiE`+}hU-b9}~3eXJ^M*dfdc-7k}DLeSM{)`CME|7zJQUdt3};q52ISq)380>_T^=lprt zi;##y29F^zwVoL#EGhml_A;T>cb%`=DkvA83He<@`PL4{i>d)Lq$~zzm@`m5S4-d+ zX5LA;aEYK2^E+P4_7zt0|2}yFlU8NKPBOykXYT84>SB}?CoJ`O(aM zX|Pp{rZ^D^Z5s=WUxc9KGI%%#s`Sk08cH@BnF#TwPo~7Ya~^-C1GB|W(NraCbDCpd z)n+IQMS*@=6ey`?7+URhD3Lcw)k&~>{WmFnCX?hdPQJ%RWJd*CK=zW#**naOJO9}R3bb zDk&!;s?j$8lQ^Zbp+U<{o}H7s6S`jamR=03aw5N-E*_2bUC%{pLDSdL z3N#ng)zEwY$+ETg{kWtVdU++>?&AuKzD+WRINcw3Y-&`>AiBYpU@?o=WCDubls2Nz z>+G%qlof?FP~uMx={0jtMu!A?pQC}lIcRz=60lVkY3B13#f3o>Uz2EF=vfT^A|=zF zYu*53FsXBPHX?OfST+M~kjim8Pye`>X@J(;AnTAI$>opJYm!a{y!GGiB!;_h(Im@8 zIxocGD;aDdUs_2vffj6$>YOd{A09>BK%&!A9Gf1wLOG}{KXCM;1aquHATL=bG|EOI z9J7-jGU4~EJx_UE72f{EY0+F7vX3mH@NeAjbs>(*^7Sx2Wr07Gq(W_|IY|Ha#pT^z ziO0F^JP6lBV$5G62-}Ig2qyREZXKtRH{OpkU!9333k7TV=|)sI+$?D7BL$YK1zei3h?y(ZkSM zNg)1z4-cLr?GhlvL>W&qq0$rSkQur|_5I%%V?|&fYrMPGG-p=52LhPu=C~&J<-wJO z7RI=lj1=c5Y!JNUrijsU{yW0tkwUZvPlvbYui(S#pAV&aM^uNYGz@qAXZ&Kpa5LE@ zxbjcYyBz=8fU`~4Uuk{+L0Fquo`QLZY5@X*@00{s6s%vqPFP^URn$+DJ`_fjZBs_5 zg+Vq?k(z@+IP=CCP}&W$NDT-q{P&W4)^q$Kk?}`2s#(z{zTePL z{(Z3I(}Quom`nfni~GoJev>yMcpQTsPf~5F=2G1uYq(*$lp6P9_xam0l!2RZy+nf@ zZ>L|JKd0kpze>^mbh}#O_v_e>RQuGIK^5Q*7|VVH@s3^=+Y#5p5bjsE2UbQ8j{6@* z)?dMa8Zr?6mWd>?`|-T+7Kn_0uDf7yoR&qB8i2a$w%@4c;DUwB?Tif4&1S!3Wc|ca z2gX_-VN~}mN8MxG|HNxnd1w&vKZZ2y%39+tU$7|WaPLW|O)@x-23owWT!nf%o5VN| ziRXn`Ola-0Umw22vbDqyOAEwe=3qN%GU|XK$N^bt;exBN=jytr|8H0g=!Gf%F_QG( zsk<>Ee~6%%ZSUvl|B1uZ6d(?#Igpo)hF`zpr_|Krd^u+iRTyEOC99d`LvKpE+-gCT zC8T4b6i~3MwV;`zYu|YhsCBAVV<*GCdtqzQoRL+64pBCj<9R4$Ueo%E$DMIGVbBV~ zO_1F8s(Tr~EnK68=O6^Y_16L(prz^&Bg@!4J-kYUs^G$6l`nz%e^kA7R8`&gJuC(B=f zyy67;;D;Yq!u^s#DgrUy)f@s!7RYe2SH&#Ghv)aQ?GZ?{u@@YcLzu=$q?rSz{{?Ef^#H6D|N za2}$3;bn6(x@P+Zb(14kV6k7cuy;Pu#LyYrCVhhc&oefR8@xsh0Nbeh^1-_BO{~X& zcH+b_0A2IHSv+yih7KXX{2lTRTXh^UF46DNZ~Q(gdjCFYOGZ}iE|N0qY#U5-7Qh|A zRwhiRNfbxD%y9;@W?6z)BCQkV*5dXD>23i87re0Jok%a@Sz3WSfX=OOy~y8j{==k% zXc$jTFAbRrmsVlFFh7Si?z*Hk-k>~w3QV>#MH_3I@*!*hRn~_q>3Q@=H%yX07ZdOp>#29oz}!(C1E= zzSUkeU}x}J(Hyw5@8#it^_8fSb~IqGg=oS7106!Fngi#?nMvh0Pb)*)y)*vDTc=-^ zheos&(gZ()qsu8d@$7PUhZ$*f-JVjJm0`^R^U|Mx%u9CMAMy;I6DOh8<8No)r@J-6 zrA}3n4MZlJ0kxSl2LjQU1C966SR%Z z>1UGG9I&nvEWZoAJ$`#P1{eQW4R^5FW>F>->e9-dYOy)K%{q@I)fAcP(wzptU)_tJ z&9#CK+7c&EYWqh=^RD9&z4Y3va7SayB~hpAuCd237$+tUx2Rj{({I|aZs$C6SjKJz zD?>1!u{Bt{P<|C)CBl+&8|0=wM+*xiaL_vx#(R zPyM=mWmc9ZjI1VTR#%xZQ0QIRZlY*maq=#!(_}|UDw4^TTjMF!;WnP8Za84TG3uRk z!b@mwq5y9GocX<*VxJS4Dl<{*MrwlKv(IUSv;p#?i@r+yI?Lg0+=vL44#wmMD3NU^ zCuQou0`FQvV8{U`p)9&F^1~iUyCed^DkIdj;7O!pxi(D1A6PFo!2d9_tumqUP>1HR z9GazzQ0xetLI0Ks)JKHao=*s1=cCB`;)5@54fogMJlB`$O^-Wa$ImT)6hi0k1A8RO-eMIw^XXQ4d$LUj1$n5whR$zK*pL@i zo+%-vY%oIRj#xVU(fyIl5H>%9Czlq_STr6^J_U1kFJ%GJ77$0UU1?0EYpAw0otx`( zNt`mVy)@8BHF;n!)^zTr2hPRsfyOAQ)!p}=46|piUQMCD8^38Mgyv_kG%`K4JC8^V zh`&I~^N?Z*_@PX_lkZ*4b3yziBfw$QAeUQ5@rtJ;J$Z*=Vqdr%Kf(lyFChbD{=z*D z`lsRdkP4QALt?fo0p8fmhnTMEw@MhlE0nW3{7zdL%|%&wkW2De z;MbF$0V`V`v{>v0=Ekqt@Jxs34o7zJ zVXmgYz?`bleI>g~qWTGeHWxpO^5@HyMc{XCWiREqroeY+L3#-Ma2BXEQ6sAxKXEIezN)kDC#tQQww#7P~~Z zkN>E!(6Q%0qMpi&$8h2q<#8?bg5^bFKRJxdWZ`Ff>!K#OqlWwux1`6@N4iUGR*ydj zzry>^=%X-pQ|0P=T6UBhxbb!`R%S$l83o>4udxt+zRyG7RzQaS>+ga2%g-YZxEqW= zTC4mXtI=kmPcTW_32nSBWsL_~plNj!1h#5Z1>q;G^3LaSbCq%0k;l`OJO1+og^dc3ME?o9|sWb}N^ zFb@?@XKm4ki1V;<$zy>f<&{0`DE`83hn?RHd6xnB%G>JtqP51?t&_g}sre+t7fz1y zag||r*8$%}!JFnOyU7#5rC{X|>pxeQHlc_O7YljvZgtSrZJ+i)leQZ|%OI&dKpJ%R z0;ZybLH(T-F8!Yp{P6?U5o=cw7nmx|1J6Q3AeQ?Ny-fN-Y}`r@2%| zzRCQ)Z0hoj;xJ_xh0o(i8sakW4Y2qZuQy!_`uEso#mpqt)}Ig zQg0bEzn34R$D4p@bI<|T-Ou%KiiGtM(I1*QtT+iY1GYb}+`=fTfIPJ7#vqmLxw5+j zm16mM+eW);JPkiD%$;pAYQ39Cv>4(pAE9nM19DLD^lW3zbFXK1vpL$ir_AbG9jvQdTr^a3t0F(49rE)&3UuL)h zKVdYly6?(C*0B9af@)%hyZD~*J)dY}H$xGI!dYp*L^;kClbqyz)E4QSW%|z&Z4`>2 zch;=%91(HuJX5~>^~IljAt$(r0z7YdyS}1812_79A}1#=4)lgj5N0(=J|Q-*djk;C zi#8>l1yF84U`LJf?>4u|%Nmw!Ac%d!7&MO~WM(OOtT03IHb5SQ`EAc|RBuo+eG z3r_2AjsrYp<`g@mTDu?Z^@;iyCVx$@QCvkQJAH(S&A|G9sA}xHK@+4Fh3y)>FV<$S ziEUxwTtfL>>Hj-F0Z9NZS>J!+_;xx;$L&XMLRYa%{?FS-GC+vJJ3t4EZ^c zXf^v>b~{+dizg!jCI$t>ZTCUzhch~)r5Bg?iyH(uxN{iHLi5fcJfRS9F|sxv5!(uV z#4ZyyR*NsA}AWDg`mSJ+a*bJIMb<76KxfgabZ&l0b`lyQ;7?0|*Fo zP5U%0=$HKGf-9CV`U=1)s(KfARIl%cG?`BFuzn`kSv0NREmOyOeLtXDfdgLrq~?VS zG%G~3>URyn@j(cD+dwsmTIy=_bQ{|qK7yoXX5%=%U_&uh<*b zi4MLz13T73ExA6q{zLxZmnX@rw<=;RE9{?E&@K|*7NNAj*0N1mB zJY;GXe)+*t=)XRqqc9j=?D?PfDtRcUj;7|V7xuzBF z)1j8CGFYFq^sCo0N@Q~#DA;@vNk@FIUer4MH67r>duW%f%TWc`>;yVa>#w)MWy6Rl zBNI&|G^VhU%hGneC&|b7XKv;t04L*!delP^Kvdh;=z0>%PM&^$MV7Qct^}M+;H}zc zneR^erW{k@vyA^-ScrVA%cXembl_n;;k1*$at53X%eJBA-CeqRHt_LdWpt`g^J}dT zf@R`sOCkc(rmgy^tbLCw*TDglFZhN!_uY&dHdeM$B=oCdf$eKW z-E$o`5a4238dEyL0uDK8*pH1xm9$_)&2N@%?~nZ%BY1UXCoznm^AY5hO*7mA0dBo; zaK%oJy9rhoxBg*FK%&rM@u6k>kFw6{WyY5A54Y^9OA|(pSaht|76>UEb@8&dJT`4C z?X?Gr1aK703u8Ap+L|YHRufvkSftkaWB+E~h!2$ZyOzC$6>)AS^Kdw>GR=?b8Pj!z<} zui3kJ0HY*GR@NoJpRg@C*PuYQT>2h{@>vWBUvwgC-tbmdqFJ&eOIo2?UfV(2rvo>3OI`2~PP)Z>lI>W4S z^#T#hh|=o0?cRJ8`sZ>_42ji~;}2yMqC|Yq`_<|eXr@+Fa6h~{IL$5+LY&h^T(WY~ zA<*}x$?RVD{p-^ZQy$HBWDBMFO>wUs8(Uct?nrfqQ8T+Ve8j=(Msn_+z1OZLnY*Oi zc6$0DC>$y7=tRJ;1)^L_a=bd5gO$@*B;)ph*8EjNAErOxI-6SA~=Y z6aS3}b%7B9xruB<=Fmx}#pM zie;qf2rjlD>`8o4-%INi;Cu5))(JM3_|4KLo{S7Bvke|d8MeVkz2=!>zk2w?yaV(= zH&kR>w2p>-$L=~)gCR;83SUx8`AWOKW?*2UCZZY~LbuHwNcYD{NSj@2;^aDo^p%g; zE~bjGWe@iK4=ycp_YRe{g3GwN{$rms`7m}~-Y8Pn;5I7WU?H&)>x+4k0HFRL^q#WA}L@CNFzF51BxY zNAx&_56eg@Wvk6SM1g~_HJI7)TOBB|L4)4I)^GQ;jRoS6XDN55AAYw!mA0e=Vf7FS z3!iRW5P=@}`pNbl^>eaNXUXq~gJ4dC-Gt3m9-gJrmA59vnICx4AniSm7uaL>$;4yf z!ojPr8_AGspO9*+n(mu+r*ihDbds0eL=Yv|y-M@Cv%|EXdCW>f>bBwa5eiELp{3vK}NFAd1Qm0kCb#rDQO{nU~TkO^I4 zwl6nx={EiGz%?x$m5;<}>$gjN_bOhh@PZ+DUThWhH?aCDHeGrvTH2>)RbF_(+yPK3 zHm}W5H8|*g!kJG7NXX6aglp*(4DC}AleK~}rd0Iv9-lmt-@78AD|c=@O-7t4$*L%i`^Rnb z%0`@dHPAWy(IWOhc1$47Cr|xjU0WD0>0lyp__PB*r5V`5(;%8>l2#?>G_Z>MM+@Iw zaPhIhj;Sr|9FjD>tK2NEMKwMbesAgr>@S4ND_1mB{)RLGHK74aFAmHoNZSN1yJ;qcb1jIz6J(51=)@`6L{W!SUMv_VqFv4sb#4lnQjtW16u(uZJ~eV8%nR zaR;!ik1^U*zP^fpYr_TV20w{A74X>Eh>p%|JaEl1UAOiDdtX>GE~B5XJ%**&h?9eq zl!KVtZ2k+c`KCKW=)iz;i4y8EqgH{xlC{A@9|3-^xK!40i^KiS^AK6QgzBMS9uy$1>@UV1YDM~~z206MzHlOdqN}rovK7u$MO3o>swkpx?)qGw_eq+)cl#h`-PVeRMP7mtWitikuScpPZ>-#H<*F5 z)8)0J#S@F{tovH4d*DC8gxARV;9Y*-9kq_SM4~^S=ZM%PY)RJR%pN)-QzYzFcZuC~ zcuhh)!W4K|C{RQQ0fn}qZO`_Qe1{byd?P`-cIaz@Pq;|XO7I%gu0m4l-+_&R1H1@N z5E^N!VAs+V4+r(m#)L$OzYyd8=Cea22|gqdi!5;AUw{B3ce)?q8#;zgmoUi$G2I!W z)@SHXc-u(>$#aaSHfw!$a(yBO!AvpNyD5>x)F-&fMi9PkCOzd0e*cjEhtm!vjD9p- znD~wRJAL6HO%x^*ccbnHM>$P3vE0k~SWx{FanD3OzYy{8BUe|WJ(B4AEY{lB$N6=8K)mP7OZrHecn@`7wirfRqvujO7i9uAD5{V)hi9-WL4RmsW zqIkrX`s7F&rt+S8T7etgL^Q=NE}Esr-aJ1J>%q#kva^4!63pt#MkN~wSs@^*xxopG zx1P*v)pFA!29dV}rj`yHo8lDVbjMkv5akxF?)UJtFrsdEO{cE^?yF?~W#krgJUV}S zMtkn(@A@cGjlP_Lj}l^yP*Fk~v8(2i1T$bN*{j8IsRUNgl zyjreXLyTyQfU%Wq5!HHOapLwR3uc1N4VbF6R07w~`cd^WM0n+vOyA)#EOW_aDqA}) z*^7VF$wnLa&L>U3ct^U4>Wqgic^z&$_tT!c{?#vTs#sxb;oEVe@}5s7-xK)36VBUA z;;|AW_b$8ewA7j9xDwN*at%VU{<7|9h!kr?6tLY5__#XV_P%yswXJ)3T)*ov7-Tr? z6GH@1kk9>dqOQVgvrBO0^LE<7I1Vv)DFK_+!J0&|ajyl<~Jl*HcRVlAV@D655%z$fVQre><+ICF~Cec??t zz(uc}-mxs*BUr828?sq-lR4a;`}|Qfmv0O8uH=&BORPl2T$0DtL8#F-HByE{7)~~w z;Wzl~fy^pjj8pnzM5AuHQq(u()pfph8VPHxXu|m2W8@Efc8?#0W+^lNJou2v(aP+}xsbi-&qEU0eF@9-W@-Ke28!Uc z)3B3mp{MA!B$6wzqkB_OWYv47!|#xzD2vF+gfTdC>s||!BYmj3T304qFS>72r92Pu~JFg(c+sHqBDo=a}w!g14jS5Kzw)Uz0 z_Dopk=v{Y4O?9NN^^WBY=eGcZ_t*XJ!=hztP!Np}FtSBY_#|Y|^AhE}h16IZ@uM{F*N$M;1D5hP#VosugqQ_?tm=64e)`qGNCDQczVH~RT z;Yx#(%ya)ti$|sj-kd?sU~h`Y6DS<8qs<0xZRJM^CM}v|p2F798;)8UkOP;s?;qvq z6(Z^5eq6?lO|Ywq#*oH?I@NN}v$2}dN7RO=J0~&mXz;BJ^A(?w!wy!kK0f*CLIR`g zHR4RDMq-w`D{NV-f)k^FfC5;ebLiE@VMBpp^2eo-H$|bX&c5kuTb!d4&PWFrXi#>6 z%-=#w40cEc$yV{6g-at?0X@ds)7d$>T3lakew7vh<%%AkXb=*$2|Gv@aR!~!g-IEs zk)mz4$`DS?qzlRO!rt$158ZN&^Lv%p4d#X$BtzZyq&am6*MPE8|JEl86JNMw*yrz7 z;+dR|$I}+gzoSr~ePWQbqa4I2@i5m<8+0DV=!cVGR^|5i?o`rux?9e>gKoK|p+7Z6 z)9UPUNJQ8&*U#zeC#GCH9|Y@UPlQqe)Mb(B?$zWSoaG~(Cc8z80^qwvb*r1Ev37Kh zylP;Lfc%2y6_1H7Z>Rj}BF0iP4|#)24sS%rY_Cv7LrX=Yr-CBv^ zrAkUYuclH;eW6UVx^wnWII~J30K$zELU0DRsh$M%*cChV{L~Pli$1k}cH^z}wTO-W z4Kx&sG1K1-yup%&=*Nc6vynapb4NrVHpn%=xeW*>y;1(^PjIZ-z?*_8=_E~+Vzx?N zYZ1ITnoPgT0Z!I+Uu)O@xqC6ycn!^l+9@*6OU;(z#`a4ytta-r+g|2IR5r+Z%%D1Agld9#2 zu$SW_n~!}4Rg19onGwp~neDmwaoNol6L1B2bk8_RY-}WOPq}bcg(>F%DsF{ z+r&Jxzpu#xEm$;^lU3WsIkp=5%_%W+1Go7Cy1GY640x-1r4(ezQN2b@wpupWyZJV3o-*C2xUdho02W3D696hR`0cnL~jZ8tTXWGj{x zqeIqrtAO*H-5-`*g2VJZydfTsQ6#fp&S)RLQZVy1fgKA$@(VUW?=n280WX z>T5iv8}{d55UWN6hy_->@`J}xqK>4QSdzN%7uFC_$x2h-`KroIcsXqd&2~+b)bY;~ zbtt=^P1^#tYvXttzZJnUe869OpXPsw3Lk;l-J!_99DGav>fpr>Yb~ zqdvpfictVtSR!B)%eS>aT*Adc$V`4Urkr!Ekgg04@2``*5j{LbgO#EbW6@mzbLV>6I`gVP ztq+$YW#W!nN2qY-2EGf{=;C#Ramo@vh?)B8Cr(G=tdl-ZaXeiMpZSb+hZzLLIj^Q&7x4XA z1oMm5CMte_9wIhirp1jl>X zKn*E(*scrnkHf{N4+KvRWN@h2LI3Gi?t`c*F=+&JS746@lu$N&zEF-nlvk0Y{V|}m zx~9RZ4qSI%?ZsHdOCli)-kfI_89#FS-nILA2;bBpgPM_H@-jAgr`rj8s#U)6zMV@+ z2{*vL16X$mX7Ri0#Vq?Z9xN)Iny=gX!1t525zOe&{+ur06ht4kjJvQ$4D@=96`NMc zYGr!utxYX{5h<=8EV>0>kO3qnQ+uqv z2C~uGcDqv8Ap%RVB=Y0lFvpBAvO3P+gQ}WXkiT zO`~7nGFtOtHTp5C#A1Os$3#;)^Pel!3^qSoI0+Fxw1G0`>y=q6-onnLqqQn}ZZ5o0 z{RXYUd4!Z;_O75NyxY-b=F6R;+Pt6BspI-?%FRn(TwWtQCu>4~=0u`}1ydh+Cis3= zo;HPnt-Dn9)bM(RipiimLY~IWHRzLpy->q^5r(kS4xlkyICE&XTjf+{J<=D@bnW^c?GyELPYd zMV_{1f;aX4hhE#yopbRHvd~vV-EP|q10F$NBWT@E4 zbgp7vk(|TdECP}5Mcp(stpfXYvs?KSxP*{vA*q>H%x(7gTL7Qjdx$!s)9}EcS8U_> zAoCA9rMg9>=z|0+`MpoG8MR|m`-}8pz)#T66?Z~Xz4Pj?KXqn}s=DvVnBZub4IyVn z7UN5^hf#mD?%WRVN)GU3A_sMfnST>`XJFl>Ag|AQr~mE+YtkV5`+~^5Q_vXJ3<4#1 zg)e##?+3puHtb?!C7`c9#+GuS$0e72XT6~6cHtBwQ0!VI6r?#lTEJ}=?f7o`epg1h z`&}GIPgqR{{v)eg-+TJj9BwJaD`@t{Qp$LCQ<}T?myL!c7NsWPUC?w+QbEpRrav1+ zu?iC#IfLV@#Mab`Gz-6S{Mce{SSLXT#f@Lnh;a*jHXLh0tOwwr?*ae^BO9$#?(f%L zDftHi|3K{~;^N?O=N0hg2m3T(j~*6LHdt+!xK;G-zoYBMhyBn5>O3_@qJiHHBRRzj z7N`0fW(`3})cR}KsDan9-fvHl$h6*E{qeu1oDh)PeB#uGuH?IFtORP*d*O+Q5c?Mt8>TF4#k`E|f`j037H*eZ33iY6qG?56gXp8-`P6ZukzNY#7GP<$cZmIlpj`1BgY5t?l277h(Q>!Xa1c471?#7z-r(P!`NNgRY7( z(x&f045zUaVSmJmPR*FgWAa`^L&nguLtRuvxtw?yT@^$K{K1gf2KOR2EMHp*>MAWj zhm;QtluidyO-P);4axeo?ZLuKnL+kn6I)M6i>Fw}ro0e6rRE~&!KESV0l`dbYQ8Q3 zwrYjL=yf+Gl4A$P#aMnE3Hi0!zwm%eyY{i}*l)4@)EC18uBt7W%MH=Nc50G}o+GoY z>g@pFMN_c=IDpBuQLxtI=1A<3v9SJZzJ|d$w(2=vK@`CUvZ@y;8cTgC%8Sjje0B)? zPufwGpz~szGg|2*pAwrBzU!)i$$96Kt8W>(5ufHZFWg>(V2+i?V7tGX;ynbEa_^zk zM(vr>)5!y6*)rsAz8P?HK&-*NuO4u_Wtn#|Hua2u5WdBg)+dkTw9gV}eL;QIqg_p= zRfAgO#3Wmc;rdGV-e8kqW#@4GZ?(OUx~unk!oTgYzt=EX@jgla)*@_yufP9p0A{(vw)l`}QFbhA*X0A;o2VDUpBewn# z5tHuDrg!I>FMeDG2YKk~$6Rs%ODA|24}q{PiUpb{na5CMV?{3A_`YUN1ZD}k0kB}2 z4HDRO?W5|3e(c8eHsV4$>sn$7%6(eefdZg5mG2+_PUz)Cay4hfv=DM-`2-5{X zG?Y3<0`DjOEa{zEdIa5|U^3`XaPsMVQ1)PZJjlWF-R$hw$H5B>?}OPwg7 zQSSJt3i^Uk$(DR>T)r_$aY9Jc6Y}w#ebHg%O7wUq-&N$ifbofvxgIPEgx zqrCw#l|;XyXIV0oUzU7~yNJXL76GCML4<72(<@=1Le?J~CU<}=q$ay@$LLg9>r069 znvSy{+JQAg@4)RginZNLZKCx(rYwTy_R?=&)!5L(YQc~>X1!T`D4XXy=yaM)cAF(@ z`)F{EiR#bfF=~?E{HmoeD8BSlN=+0PIB@%xhA`tv8F`x^tm_=;(m`-zwsrDsz|{N2 zS9CLv+K%9q4hv*wH7^aq&SK{UJ|UZ!{wvSUBlQeT23D?dDi5f6FM*{fpw-)tz`bWA z8t!Dq77@07gWkJu_u>92-w#4!MwAn^^EWVLG9wz6z&}b17Vc{ik(|qkAXzl*zho*u z=!HzBZV7a1IpEw#l2c}bkQKz?Wncer5lNFJOAEY?D^8O*xxE;einpUKhuSH z3O2zFUiM#L#?b^gAtaU){w{21&wDMwE5ACyNJ#6p5Xb1INj#!-Q&)JVAV#z-Bm;~k zS0|*y76tTs-W|D)s;d(Se|V8FEzf@uW}(u2z>%+yq@Dw1W^1v9SH1AVbAe8C++Pw@ zSkk8Rydiw@MZf)EYKNL(rWu@Uw20H-OI&;UIO+Yz9GW{=#)yJr7&#ov=k44fY?zBv z24i0Zr!+YEg~MA3^Vnl4_P#Q-G=b zS_Lx^=JdDB=J52Y4RL3tr;bf>0OS;bq#~#7H|rhpyRHgHReB9CvpwdqsXx)5_Mj@s znczw99SgSq0*jZ$Ijk7y^t>Tky0xQ6pyAMR7D>7ZHxfYUjX1+z{vIUjwg~Q>2Aa@K zgFTbu+Z4Iku&aQj9+hbl+pXybkxiWCDPaG=uJK4!xy9{VUg8E#FH>`!GT<{T7uas> zpHT~)f_QJw%-B_e7r$L4-SM2ME|~|C2HwQpV>;d=sB9ISQUo znp2s=A`j(5p|bwIW;(QGJ;?e!?QFxe&P%Z-1a`esElBP+M%EpUb1Vs%A`Q&Nw7&0b zAkC_Sl642E+o(0^QL1)ZZGX_4%y9{}xjEBW|#%tmvmR@d0Uk$ykMhA3X!W)5pdGJbiX{ir@TH9oh$V zfB>W`Tv#AR4wkJcEcuUH<)n%|A{0y9yFp}UnhHdJp}XyZ@4CMyym8F{M$rOdm>~Il z`}u>GNY(1WIg;3tRq6@ z#kH!=*d8G24NeeTe{ggWMa=e8^PBX54b`w-GNnB`k%n4d`gpND{wB3vd5Buqe+h>f zCXNzgi%J3nHNjovDK6OImh9Uq5&UDIQv-H_jaQdaQswuN5-?3=1Je|hWV}p*J!50) zCUy92h4Hv!9LR|})`~Y(VbKX-Yk;7vSx*@STdUoaQs`&(AeG#;FL?FPu0%D5M2+_2 z5gjE)>BE>vaJD%hY>*~P{GYIqACKC;5uJ>GSCC*nCzDQyoWEzpLq(Ek#n+4}1;_*K z4#%8QJ}f@NNN)@63lfB_D;^;CRpy?6yPNwto%MW&RysrasPSA#Nc;R4?vFxfW8k(=K-#zGG^ZmC-3hD z_}~X^)O;(psF(vfo5}7ey^I&-VQ)TDWXSbtaK(B3WAZ!wMOS8Fb=H@_e_u8Ris1UWEP8@+dzLxKdK);LJt_ef>yoU9BDh zLbwl3%%}woa*#8ps+IqMsA7FA?hH=E0%#zOn4H+!cTu<)x$h36O##GVJodI0G_fhC z2?+v^Z)iz>KeK0*V{rDMpHL0EHE9H2JnxPi;d^6X3B-#B;N7PHZE*-ZF3n z`xqJLmT4NDij~p#5X6Ishuh`Xb{BjsYn)I<%vDQVVkm+VzT;Y_Q!A^U!TtE|2F&9W z)|Sl=R|k=ur{2D*ILIl5AN%fWGUPV(cVz;B$(gG*n+LaApJ~`gT3es%Q~&YNgTK;L znZx?m(ucm6!Fi{5rlqUNBkSnTfRc4ZlWk#D^ZEO3+W6z~b?r7O0Ei{`-7(VG=NG1- zb#n0HE05ogLkjmTeMM7Wx}| z05VoVkB=;wz(J3GOkavMb9>9ZT|BAJWjCbav@t~+BCF`L(!&$~q9u-}Hl`mBn%h%} zKZyRV<)S3lZ#ho~44vUQPbCG<6r!ik&*?GT$3la3#%$cbWoTkZHwK8V#_zicwuW3c$<0iQN^bz zy>cnR3dQj0%8L&e@q$A1A^w#`qQ4WC@YFuIx;Tx{3_dd#s$vLmR46|cL}5;>03?Tikux6gS)aFc^-V0bIY?^ERPSX(({12IdP}(m zA0tWR$cav%4QYb0&-y$#%+AchpiU;hya?u|N(3?vW+K&KJP|$=mxIL5-kS@{Dba$( zSjq8TgaUnV%Aj3JutV`_tn&f}E^&+n!FbFj@!N(Leusu^^&K4I-_b?cWBLnqFkS*2 z76@tW-Nz4R3(|ID^#r|pt;JhJVsS2CCbntyUntVRmxK$n-8PMxadBSnKiNx$Hr_de zvd_q#;cu7zw^vmtGLFNAQt`k@AsPLvm4`_FOZ+*-Yfg2-3vzpv#wVCP|MpS+Sxi>F z@sn(*{}>=x0Dbd@GPau|N4w;p%5dG)QGh;RZ8+M0f!2dpjb0-Hpy?hGDd$c-&tlR_ zsRT~KAOOxEJ1fS&tPLq0SWf`vJ^*3r|Byt~tY24g6SKNlG(1nHcM+LX;z(Op1Lz}8 zs z1XIp*SYv~3S5QtnG-%^+UaQBwf|I7{UvA)>;)ax3nMX{?$_>0G1P0O=X#K zdOMoCl3TV@mN`TevC^~)6XvA{dIy20;D+%%KF<&jQu^!#>7pzVM!&a5*Pkts3C{F;3Ohm#WMPk<4*=*K8B^;3cZ?w-|$vu>(W+ zu)KM1pi7-$NO9GE)wjt)=7}cOgvZ~cNX*P?&=OBB`sTa{Huf&~Zy$I|7k{&--pr{j z7YVT3cO4+`g9LK{GTz^!<-J&Wfu&vBIK4;m4|UvO`7hL2QrkO#@^7fBnZzt7?Bl~` z;Z{`|lzOO@ll{0<{;=}GNE3E;Dqif{gr?yGoLuF@=l!hOLrg!i_+o&U_BOozjsWn<5sVG36)4c;&PcYq#I#?ynL`D?LWdRq|7$ zU=6Szs-zzPMgkyePOA7LLQGK8+fJCAp*yCJ$!XN##!L~iJ2-67>(74*?G zV-I(Jmw-Bi*X?z7SEDs<4NLP8>e>8UgzTkCH}YnTCEA#P+@jnb=9Ul1DUE&d%^!C9 zj62K-`uugiT&4VAK%P<|M$ZpFspk#;amK?55}ec6 zFlZ-BbPn@j444Fma0ljELh+2~%ZfXp=MkP#DI0zkg;cZ<*W`w@g*Q;nl#F$J_d}@} zAeaesnmMR2nM_vL!Jo-IIm%rqrf(9)>CWq!3UdO#ka?v%aIC}K>!JB4U%`NNBmo}d zsCa?=lUeNvV6Xs8PD`{u_vKt_@*b6{%hCR^2?YRXLA{_h&?(kw%Z#h31;lmso9D8I zngx&e(934&sc6c>BY>8BBI!jDCqyQ9xu<`S9IgL|Np`$g(OqCu{i0(P(e>nm}wP73!!W0)}i1#s`b*~b*$>nJ_Q z7L9RA;6l)9Jzg51)NQ)*-)ggIjj4dr(V&dLe;1n`f$b@_tBw-^pnD0#aV}@<-Aol> zyVEg^G9AXd^+cr>1_4u~I9*eEICP`g)h`BSaf>Mm7JY;wQwLaOY!q1Ku2V^B=Adh6 zgR;l*UHx}=SgOv$@;)hdgxG6vEFxOy0q?LO$aG6~XN-RUjo>LOe|t*5FdvWZskgb8DX@ zk4F^KB7fZxRUs~b3ukF(<1lu6cC?(rJxP{|b3YO4l! z-Mn6aZ4q6EbZnaVrl_M*R#44Q@T~sR7QX$49J{Ss7KhOS$xJ%zaN1MR+#182 zl!n7PmZ($=_{)-kGEs#ze5wM{Er9H!09HXs3uM4cSmcS*IWhTeDqg8h^JV7S#!+LZ zk>to5@?p%UlyEcS`emHP8an|{E6oMQYSXW*xElYd5XrMwAVHhw+w3-7Z4!}F_^~L7;7*REO?i3u8Gg1)t79X2b7=yXXp2K^qdY15W zg8q&|i69!oMA=*kV6g~5w5!Ui`g|Ht=UokaWwMvcqv|59!}(FfKkzouhXHzA zJ|<^rF+LP2&~Y{cL}^U&s!lL}3-d1iNHh+VEZy9AzC+BnSqBpRYYcP-ib)Y|Qka>A z19H*odMEi5-xoem6(}%Osfiyi=)cv*O+9M{)!b13k0VfN%RTh>bS^+ysZsb!=f6E( zEw~(lRSHP~+U&CU0y457>=O`ilatRyd4{@#GH98Zy&T*@D8sV~fbhSZQ3RXS zl~Hnu>*}8ZcA1Z9le;K{KI~c@q|^j=J~k_!Vz~Fl5+P> zm3o<;&SFp4D>S(IH*F8Uj>VbfRKdVuJ?(dN$)I^|GP@=$nLIC2+VB$I^3SZKRqL;y z${67`%6npilvv~HNJGtO3mnMyJ1(oXNYNYjzU}h2vtli0EI|O5#1Qn2S7EZ#L5;r| zb4E-BmHn~_%LsnGe{S$LJfaNCl3Wd`2ng%oV`a}6g%)uL?r^@cUyw-KYo0I;V(l$u zH8_UxS8>*wkEXU~Lv@mk1x*H9$K%sXC@MChJ6FpZZA6N@sajwldS0yXuHIq|ur?XK z-4%BP+0x{&>Vo>2V#CKgX)90&WH9b9(nmpqu zb=^s_SMWPSB=7{a<#0~XP2DEQAt1Qr>&2v zxNe@gF`5vD+A*5kM5{(GHu|2a=v9=uqIXuhvM;pViK?F&%vR0m&sO0*AG}|F`wmC; z-37d~2oUrDQve)9*4#QPc)@T+Gqmvi^T>7w33lgm8f7wFJzJ*0Ek^#B^oXCKy;wTXoShL?bzNO$N9-Lb?s;6dDiV0@q`j;SXGHIE?>PvV{McTi^H>bb>mg_I|~ z_F!w{UMKe?)zkaZDiCcqYJ_5USaVwZ!s=sPIYm*49a$iS9twbEha2+FZW{8CM~WbK z;q>kp{?XkGe_~3K&6W{m45npp^A;r~p0pxuosbx%u}ZuCjWYh8=Ww`f@JJkFTcxGt zW~{vn7-$%DzO^F8oaqsyl+Gz=t#e#n+Z)0UzG{^S&9}E8S8FMJp$zpd%e}NlN}7C* z3^1%9-MlyZ=OAXl*KH-F?M*=@=(30K?rgyG5-^k4Lh5wO38b4>;-5z0gyZGJiAQ1NnrvUaR1=oDk!~8mZzJ(u@`dOhslUaZz&;G`hXg#pvj}UZheTIbl1RuXHvO zqdDSOvG;qOW3FOq)2&-VmQ$l6nVhlr&vUE7#06dg7Y>h7*Nm4EbLGD#N!elRdpF0$ zV%vc#4U|TK+jK`Pq$t5-e;mv4N{^btaP z*Q_W&yz@&m=kf~9HNT3^b*InYFJ&~jq~Yq3v1EYEWcHLIaNn2U_4~-X_*mcuoK4n4|+wH>EoA~ z6M20fB=|0`l+Flm=64dSXIVH=9RgsL0$PJLfaWDPy79Ok`)Ep)Fgn8%-hf$1iYjvU ztY^g<{u;4y(kY6}#Af~gfL`T4pP^kgBv-HFJg@Wl=G)cvSepK5z}Uo~?Em72`qxnx zr_d7ot{aT6wXM;TFhUW9Nx%WSd z<7|?_wI`SY={2v+k@_#H6g$2HcTKnG zB`NCboBeL4^Y0sx%9^1RACT%W*5cmmM^)SnBb@K$-RRGnmX!MOP+HyB!@FTZL~)kS zJZiJbD*(JuYX9%p*a*CzyDp{i{K5X81iUZChj-)>%T<-Ylj-9iPeFL_4bhk z{BTEH#BadBOQBNn?>&-VE?4{W9a6#fE-Xs3LxubDII9S9ddMfnPOcmfH$fq0qaE*dr{ob5jKmr-tym{umgN~d2q-#4mmNwh zVw6SKu=2ccz!*7#EZpJ^8l|`x1PQ!keXJKEBM>Od4 zLe?o*m+aI?1;(H`;|LqH_2G&xIIIlMN9O0bp7m$rr_qL))b42z1a^GTAQ1d03y!Ck zJ<`vOMrgz|QcKYoj4y zYkJuJ`xtk|_pu^(uW`MOlhX%#jE>{XYoE=981*juXD2oMr$WwftNuLa2#tXKq;Uo^ z2$kuXyl!JcQA@G^B1gF&A{OEtduju*glL#)$&sX$u8p@?< zDMc5BJVb1(E1{YhbC(Eu?tGGV-~TSwQq@~h6Q&Uxx@SDSf_m9YvO3GNqicMnhd_n< z!X2Y64b@kTR+px^6g?iEV%CD%#>IMQv{B4Lrc=CY-cEb&l_U6|=#l$xYuMlXfa2RO zDZ|$9djY!v*2M(&|Epkw{;L74olr!s--gWv-hwBd@;;+l6`Jo)*g%gdCc)JS6YR*p zTHlQw=DSbpBf9kWD4Vv#*^CzJ51)EVRu^m%Oa2=Ld)atZh}b=lb8Hs3#>$KkxD^ot z8orZ69QQ{^)akduBy)t=_9;Og9Uy5b+>!$gtNgS)a5`x}bN(Pg=xec;xI-!eZ9@|A$K&% zJpZzXKdCCzDc5;-Chc&U&u0T8|;Z6Gep>r%^x{|Gy^S;cU%=&(&5`kbQ zuFTecrt2}UFqhYaHE!eL5vZ}`G#b77dM!f5Z-U-AG7dDvyrgz}y#=Y{_(GEFj4GLC zl2nl&6uFDW_@11$E3!Yfb%Up1^FhK}qtf9!U(xUwZ=jujNtXLfE;dWkH zc^1%ewQx&AnQzSOykYte3bCcO0gkwbFwoq~srFoF!H$=p6@K)D#-vhRr-sEN(Xx)c z`kY;gq%-4*%~Cgw-?FUcP(!8-js82o!%rjKM$X?jZ`PYrt9xVIY60sfzZYY zPm9&xbYfVL0cY)m0+lvPne>{Y;EnPKI!hcg0rR2}w)%&PLuK8#WPu?@=1(6@N~ISXz-IC8CwrkPG(P4TBzg}QY+pE#4WV+nGpHujLY;_h61WTn3jDX$klq=cz= zAxdSSkxNLVC^i!gAjWgrtMENF=eO`w;xA;?|SpVJAU0!v0*@@?}; zDw*DKDSHAL+bat6Bc(PcEwod&cB>!cy>Lt0H5s6*7^te)wSHio3sDjS&2V-2_vLAU zyr@;wBwK)C4Xj{RYNKd`ml;Bo@u9E4q~ul#ucAysV{bI+x!tnbONm4LVXC*rTN!unW(jiWDZjoOvaB;?o&ciIYx97iYdW|74AS3LJuTpTzWMcpc; zK0Tsr*=|_=B+s19`u(D(m*=#VtjoXpr)$y6`N7vd&(_#7e_raQP==Djzd0Yd8+a#` zu+?4f1*Q@7Pj+wGXpOoE_)`Qn3ygy`6*h8PAg;`o7q#_RB@U} z4V$^Lx8Yvxr*M53o%$@vX|=QLd9g>w5-hf;?c|h8V5#U{8yPo}?hv9bd3A-!{)+1% zk=TvH@Vl;wv8ZEQkBKYbg@8Zl5^er@&HEtbRpsU!x-P1owOD-t?s|>3(vfIgLZjfr ze>Ey+^}OJHP@(qgA#fnJkA6rA8skeG{$YEd-(SPq#s#YP^k0dHwMb>J=Lb4msmbgP zZGH9=;A=A8aEvfxT(7g>o{1Sldg+Kk3H62iGiK!EL+d&MN~<4>?_nw{4f(AcHPpz02{6fY}~PA=qH z>AxRb?PMdI^{kk`Wc((F&qhu5l95CMqkTwaxz>U+{5Dd?-z0;mtXVV2cqp#LY(LPp zajLPo6)zHMQVV!}_C0%5&W(qLdB->oUtXU+hrZ8Tm2W!I>YYLbP7?x?=2X_Snns8F ztjt|9kpAFC`ubPqIiK7qde60|YLSM{t)?8#lEn0JA(A(-j13xq*8|c!bydo;RJ1ju zauK3FxivGWrC&qFUF1qkn3qWY?~~_)0Zlg6{7Nhtv$@=^;R);>Oo#mHBl}R9!u16Y z=LNCk8j)ZLc6TW(OdI5W&^gc>W0F8OmN6h_Yv|;zWJXhVIm0fGeXI6dz+|kuT0Y2K z>ZxpDN&{mJAeFftL5rfZW_r|odu?}1st=u^KhMIVzMu}z87dXWfuvbIld`*!Orp3& zHNi_w8sPM%JqPrGO)uI1R+ zrTuGz4Sb=HKY&h zuAEw5t$+A5{DP4ju_LF`Qjv~^6l~L>{@FBq2Q?|irUYmw9V)O-4!b+&&-v%ilW#4QoBHP8fW0ecVR z79PygOw^-dhj*++OArEN!PV>SBy4af!5QY?l}bXEbvrJ5qjKpHrxKzidoak;K7^By zuCe0hV5rfA(aRBRER6%Wp1{iB_j1ohi~vIz0lF+OOd2pOY96#>nnHWApkC=@eIjYC z+YfS8tD$zo36T_#fGMBpiHM@qbaGu~jP~#Z^+JBh-<&LH|Ahb0IuQOeDBFBpZ3K}7 zCbeM)8xH%P-|#`Sib-0hxdTzJ#>b-3L|V6F`DBzNS~COLYX^q)HvvzjhV{;J9Qs|NsGFmH zUGelYuoAc2@?B6Gjgv9U8`!@~vHl8u8YngzGOkcQ`QOFzp9&&BY`f1gLa}!mM!eDa zm;Yz)E_8+XTb+*_7v!N@O?}P>50Np?0Ut$;9F=AQ%gs^U)?w^CAHD~Gz<_510O=zD zkT9VYRR~aT@;qW(WACA^_d}DG5{;zox^#Em=96@T#~R z#bFJar=)!ucij&L+?#&}ru}bjbnMKR&GS=7 zQoAsz9*+-LtidHtFu&HxU?QFTtgEZ-T;|7^+ad{Ww z6uuDOi9izVK%@X=l^0v$`xPbx5X*mxgD>Efd9DY3PlP1fY_8cRszwObjRoi!p!+5l zVd6J-*IT75KbNSK6fylOEb2M5Py9j`a~wufV0xxUw;t!_5-qOx6sD|qx;&-Zs`I%X z3UX{vTj(xv`*-eMi)QonR#7$VQ=M2wxa=+DlaHO(4c~t%uJz5dmr%v%Ja`N{m48q1 zc-Yzn7UI+isF6ARC2uH__AI?V(T%O3(SHgpF7w62(vE#%uW0gsgQ8zppzQl^-Z%r5 zm%S88WZ!WH&@dH~JP)g~Wlxlx&_2UVR!^g0>Bbm+*y!)jTPjkdl-FR;G&HXC3HH#c zK#3%f3~q&J8`{R5L84p#34vnnIKG8lP9`<0?C)zO@agKw8FOgJz#_(ccN5Gk$X?;~ zNw2v1x>QG}O_w##mihV781wQy8L5MmBy|6=q!s5EKmuu}5#|rg$E?PS=(J$BAu2 z5{sJh&Iom|jg88L%>@WXAX3n+Rh#w>kEtpLS9j$9XTu8GF3ScrFI;3_6}mF?e&$Fa z+{D|E!TQk%RvU7S3PKvQsw;bV*!X4Ku$iCOF-BKFmm!q zOMTb7dGCLTZnBMW1dB`i0!W$od7jR$s_p+f;ziqYDgT})lZ~cw^E+APv(tJ`{X`^U z{Gmp`U-w7!SHrBf4Wl^$@^?t(cR4F{7(Jox4>t5a@nEcwYqp<=Y((7rrvAUbxJ3v| zO9^fiQC(x*M_njoXT3fX28(9q+m;=fCB&;ML^MFeG_ku}BegbHoH zfGW#3{NErkeqUxk)i!+1&bLENy#$k%h=*I4l9>%Vb#;mscf1GzE)lz=GUi#UBc^yE zZGKHx<~OH?HaPcQq&V(Hn$4c*9ak`hS5!7kob6-)Fs!qPJV76+A>2aO3BTI4Y(0tj z_7dfoMqI6ZvOpw_owkUfseg&vk&Vn3mS&P!jTB$cX2?PiWn)zMNKD+({&4wm;IvX; zG2Vt@?4z+7?*E4q{@dm(^QuE)Jau+1kf8T(mHJ3QPIt!nmvz%T09VlE4$bVca?r?6)^lVF7`0lgcTOd#06t|XfC~F?Jorj5BOYc34yYRHzM;{N z>;hkjq!@F|5}csCWZ0#qk=Y_L zF#&Ibv}t;;b}$eqH9+kLFhDdg+E!LteQjAorgA*9meP@b&6 zOv7@i#XkR^#B5>EkjVx&Vx9S#ISv0fG;AGz&K-IBs?s9mKQThE`}GH9McTY+h%09% z*d?wsN0UMkUSlU}oc_)LsHevH`DXj7R7S*Eu;7(^lYInD-G5*Uz3iCTVR)e}Sdy@Y z4F5`CTk=iANFMk9X)7HwU%))fc-D6dhOad(@3oj?Gd>Zy_x=-s#dJUf-cmsw`2Y7e z1F5M9^;RyJb@5!mn}r`?7d?N?&Gek7(s2K2HJ(7SiGuMYFD6-gYTI}8)Re(y{-)q2A)PQO0WwN#cc+w&TnU-8nQ&H z6{?)}x&JLw@71k-OKQ{4!cv1mSHy2;Zmxxe(UxrK$Pi2vZoO^wRXw$_SCUq@scOn8eUx)N8X?Ptc?=0_-G59^#ao9 zp9um{_K2@&8WC?7h#WFi8qW+ISj3S`p|t<5J5UoNxhgX^$`?t@Yo3zGrs&)WS?7cu ze4&;=dx0k?n}a<0jG6=H|5+ME=u-Qedqpd=Tr?kz7zP|4eVk1@jh)LO?4An3<69VQ_PNkE4lOTDqrV8*=Ija?1n{hP>#Bv{z~rXZMNbM ze~w_0+n;>cK^wTDR^@+Fv+*f4mw?m^S7~1sYEas&3WOs};`l}vkZ(G}$YTT1L{S6; z^S$*{bw!K$$bhLJ$H zu*UyWWy9_6H~ii~3CKTcz7niJ8xU7wDGuIl4$lP=A8~~84|7qNtp+i_4=9dYLBoW) z9nm)_KC?(UAxW#T7F2830T>Z)iSa3Vu|5uJQs{rSaTbP?rQ0+w)Mf56BLK_#j6m${{%! zc&UlyAGHVj2l4`d@1MDuht*vgF-$vN1;@3h-5UVJA%FEzv(Pa)=B9EQDwu zhb`iN7lL9wv}h}^Xq9EwEG! z3Zl5YD=y9-GB@EG32h=Z#ur<@Dit%A&2DI1OQ#T*(P%?FNCi&{h!ws&9Sctyng@E? z_d`I8)OWM35Z-J#LxN}>vkClMUEpwd*ciU-sT??!2Y7i^0w5o8u}55dPyQ=&I;j4c z+!`agSoo}(Creqhj#HT@t&)j8f95b#p_2t@$iP3dVWiXhVFgkyfHNWsNu-U{^@0#g zBL3Sgmw{y|4AYQ66m9=6^!W?c4#eS^ucsLNfg*kTmlLH|fTCVzdgfGEF1#lx83}CUH}mD~X){Q}x+oXo zQ+28%nqhwd(v1P?$W}qq<*=4^tL2-p(>L$u9J9ila({skKzdP4Zzgj?OgTHpP~O|- zR=M;^C(Am`kNtO&S04O*-Z~(S>v0%^?zeB{9~VnV+k9n6+wo{n&vk5sQU?0_YupOxQrf`uzfW_Tzs> zES{%+rM2xiX)cGl71Q#|z#llWKGxL-*&7TYh?hz{`-`mSgMqHaUr|3;>$Uiam;(Ix z6V2|ShaJd`!@LHhp@bp->>@?sFq7w4fBUR5dG0T&>3HdE&{cw1efSU6$y~B&oJUH2 z{QU&^!Dn;2L$r(^Tm+Aw*BpO*i=Q)zwjCt zWo)&nn)y|4R*@S1Z#$cYzv!_T;i-26OMw^qciQ~;EJ@f{NhTY_y)advx*YCeqJ=YHq(%@I@@r# zPVN9OO>Pw#uCh~^-pnB_WUgL1miyfN-Q)T?sV$@XL4bIl0tj4n-zkcB7oBHjW!qAX zCVK#WOb+13J}`Ln&_vEh7&BMJn8C3%qy6%(tNHyFHcPH}_ria@2si=%hs)Z3sVIop za1U94by|>5UBZ(KxgfmsU*tm@#e>^9JQRNxA| zO^qSpBXFKu0rp!!RF$y2Lt{W_r*5^ND%FJckS0Jd*C0>Ovs&$PXstctVflw^f|l*a z+fSQ>XUqwAfu;)#36XI$(sgF_h3I|(*OZqj*;(b9UsgowG@Qrkw1SpvE}6t(Yx-J! zu7>3G@67#LMg%d90=Jv=Ha&+`E&LZ*c3_HARzCxZ>9b<5lKF31D-{o%T50Lqrp>*0 zvg8d8-BB-|nI5vSsY<09p9{+6z+sJwz>XTNv(;m%jCNs-%fFL)3m=S!%DRQ?!u4Q; zZHvPWaK_XU=TSsgNuIFb>>IyXt47@gG+e29zBA=t7@d@SZD~X;02`@a(!1fA z^o)RY*JC=1P2Eur$22P*dgZd?k#q2)r7slGMGd^fS59D+)k<+AmL;9-QVcVFt-Dms za~O3>$&jZ&8eyN$G~#@=DTIF%%#%n2%oL^OZVJ4u3YEJwH83*Ih(hXN%|{}y`XhcA}li2Z+0cWitz$& zIec+0d)kWuzY3zLn*ycCL2?jZahMYfz^$I)Y0%N9{*ld*75r*no`-$zcX%Yu6X*8sScjd#?#2OJyBc9ADs=8Shq1 z(=`CxiLr^4L}fuwKAOX7gNu?TO6(`xUFuM~jVJ-1cFO{UXx+}*d)+)Px;NXIn#r9)qBJuwVn0g1eFq0ex8`<2{)VB0I2d0r^QF116a428Noy^#7{R zx5kg+W+LKp-HtOe?aOnf)6}cn`7FUa*B2;_Tn|J#=G$c*>${OJ6 z5vQ+CF;b7o*EB6;Kbh>p89XudyCT~jby(J&K?W$&an3FDdmar)U&=9%Li$D7P^o$k zQP0RO@~i8u2E?gmHyrRd+E0uogTfDC1K8rdgTmDgPPu#Qtf&k*aK_x4&n>6(;U~8` zPg!K1L{}U)1!6R>wf%J!t8fq?B*`>TMmE(7`(9xprH-ZxH%fN{Xi)l^PWHTUT%W`F z!deAfFdbyl2-vM6L;mH%E#1#rIvam0L!XVrn|HM*!(=?ofB&ijshDr;ipJCa$eUS3 zuY_)R_*-eY!OIoIPn-!(J8*11#)x>jxTbj-<1e+cjT+JWy4f0D|Qui$2X}z?u$Bd&Sp!x=Wc%kInH?d@4o9h$Z zRRj+7k7R^ljK5N-6c;?h9`*~EJAX$p{+f?nm-=xI?)1x8n&SO+c_i$6f1R_Q5t7A+ zd{Zy2hTiv>9)h^qEEJmVJU{F8bC%9kKj@J&()}=5;-00pPY0Vei1|`{EmtbO|4D9^$GY&zWVU zS2SdYRbuk-_t@H_MbHxSVdcg=l(Ap?faCQ3*OB35K6pMX9tNaqr?l}+t}M+|b?0MP zC6%b^it0fl{c2-znddsXOv;}U9l>6yqFn+j`c$8_5(@B@5`mj}{LToI(k$s^&O48h z%aiZmx0wzC)Bs5=$rig2UK%JTd(O0!G`uWQhn$27j}!p$Z>}#K0V~5GA-IvYS;A~k zH$3?9j`5=5C_niv_fBb@(WIJYZCGj1R~@?pxpQT+J71AR?>iDkE$m6*g!UWh0gIok zEYi(Ti|;??z9oN?3-@#rtdi|)FLyH~ZsZm1GS6d2YhNUKEx*zw5G_-zZ$)0rUKnRA zE2d4M%&qIwzarA4@p=&)${z83R3N4{_ivjoQEI-eMrAAVzJwX|w&64M+$Wct@8vgWb0bN#G+S*pm9}F0i+%zn zQ1N!FiIe{k@~)S7mnT^EuGrtM{+rZU9r5uQ z^^J?;;2G+tnV(mPJ_&6|;R)Vl(J7Ji#t|uDmP&2?{A&e!ygF!(U6@I}{-rER8_N%5 zbH?cO=lfY&L;;Q?yp2;_caX;4x)t$AvaEug&x#L~L2N7hl_e7autz1tn8bi)mQ^&j z-Rb1P4YS;!iSCPy^FmVvb7GneHp(78SGxfQkYbQNR7v8)DxJeB{cJe;xRX@S{_Hz6 zH!EZ1eMt-4<WsE&A-N zZrA26>WiO)w3eSK|9#@8mV_!C0bw0l_iyjM&I=vl9h&la@Cf8fR=>4I5g?(Roc5Hv z1OoUIooz}2v~1W?HK0Pf!T3p4s;fdo7?cqjJh-$RGjx^}U?WqFY<6Gm<`AyU`Y#*c zynA^TsQ`QssT_^pGvJB-6E({wd)_@K4^tcmGqiYDyf00~zgz*c(gSNsT!;_6M{{Wu zkuerce?EB|e&lHa$U#s9i#8z^GG9;nm|40~q&$X*VU8N|7#e1Ka}|dTUV++`)j=% zt>(m4&vC;V1>vRk_ZXT|(R}skOB&UEf49`cP^jTfMM>B2U^FQnb@(}Gj@3hA^j&KO z2G+FU#^eAt`~6H`o8oy@Kjmbi*ScNQ$TE;RDe zKJQh0I2E#(R=n>g3OlAs{Iw|9!bEN}-}}y-PI%gr>ah=TUmzJ+(4G=F1~RjgiB6zn9RV&I56 zX1IH;>(&w@`w#(Jf?YJFnW6dulAdl*#^Ba1uPsSMTJN{Xm?ERzi zn9J}O%A{W4yn`Epw>k%4SyF0x2zb(ro2Hs&6+>TtC=GxC>VPsZ`ojaGxf?K=hp3=> zTscI%k1L;(2DgT2SZ9%N$9hL21Ej1MGKOu8_LVDTLe>l0?%fZx6LPmaN!=b7c> zdXb04x*5E2X#D%7v^K8b?SX^q^JAGd8O86OEi%C6jat*Cg=h7;vn0a3_OX=cgUSz;C{! z9iSM!$g47#-pDJg%EikDuCkr%<^3$4{dJ!U#H^(LHOryos{v;wTVws>hJcG@tGt!tMkrOWYd z>oqyJX@JK5@Z=hlb5p2~UCI;N*FuSiC9-ZY&VLL=3|stM1Ng-6oU3Fb?U_NZxm&iBZ}P} z16e1OJA~tt>lj$%GmbiG!FZdZ@N{*JDQx3aUVb>YqO{S+ZN~M@D$4+gHO9l1dQi^5 zMa^>WZsp0X&!=;D6&#_;-XKL3Y3TE6`a|Nv!K#xx+x~o7Z7@k)6^}8kV%?pm486_) zy4W!v!Xk5Phq*_wYO5kySlFo#PnZyNJP1EPGVPM zlJk>{V^BQv%>su?wjC;C{OBa}N${C0_BY9AVT#@=qu<<5gK~zHztyTWXBuNZH!bCw z-vs^{k}O&|CJU>#O_bqWu7B6*c&#tcXzrA@G4^V=s<3>f{cxS}>bS}h<2+9uukGTq zYwzM79tzJ$Z2@P&WFy7&o(fXKP4yyUgh}~LDP3MALCh;u!?3k4<12|s{fd{ue@3Iu z$#;>B87!V6%D-NW|Ga&F%fhda(~H|5izY0+)jAUHR(ri>`LXD-82B6ak~q=qRSV{i zOI16sdw5-@GghKf%jqhU1%G@Z5@ayCM9gU~!(cKZpfXIrykrxM#~N)RHF&B>wdOzq zz{9AXSFp$fUpM!hpG!7G8f6}sh?prQZG6(bobT@%&<%>?*oKfiQ?q4|OmksOYwc4t2j@i}I?>kkTT{@UxFO?L zj{pODaHd>^B^O4NxjeAu`RMxTaLwX=0VOzWah2H>QAVNm0U!HfgkO+7f?^e${Wwd( zM2X97S?bu;s}BU3a82~;*drT0AjF>TVxWCuK+%rx!x$EWyBf-Z;p#OS*die<06SgK z_JQH{L)qE%?>yeG-piP6F5p5SrD|3^e>hDqgwRl$! z>4byb^qk>_ed&QPdgVC%ZQ5z2HtGYhU37yIA4|_B^%}@0^QA$@DU#_@*NB7WM!(Or zYE)u-3Smv(NKM9Beqywr-$9A+&+|P+fMDafJljAX*ls)?tq!N7RA>UNP$SOgKA9zx z5-^i2IhNcVe3FrdYsHl6*C+c#)5;O)W&Wdxt?e1I*r(UE!nBuWSP_R z4*_ye@b>ZLk5MSuLNwW(Bnoro(d4!J7ptJ(kHVaYM);`6#dinWEQ4yrNcPfIF_O+k z(*>J9nKj9H82W4ExSbY`j1RhK$$M9Xl^Lly>l+mvB1WGFj1J6mDTEa{xVl{Iwo}z> zIUB1*B9U^SQuM>81z>V$qD&zK$=dn^%b^V*Ou&*^cZ0y5q^hDaB7v^5vby+<}Rb>MXp(e4Vo zsxBnDS!MWg7N!t!wx{7Sx3g2lu=b-$Zk6I)~^%}1;_Qjm=d56gw{t@ay9OR-Se3 zmSbE->tPp|O&whraqWB*>l+aYZuhn0eU}Sr$87bKtg6Otcte8tO*JwlG(WGEmVL?* z)l>^1;Y05FUaZ2{ujnqw=Q{>gdqB{R-Rl>2*Ear=kM@!O2e;C{+q$2 zr&aVKwOVO!>5^xE*kjE5Fp~Dr3M+Kyt(x)`Huvs0cPk{3hG}uL4&Ag{e~8shQlOsI zo*J#%w!yTQ4PoI2;sXHRj4L=N-yU9#f7Zv7zIe4}^GE6o1t~U4NofPUY>#%CFBE4eerj zEjp(5a|PpPGRB#qoniWjBQyJhV_EcROfkN43kT2t|Pu9_c>6zs# zh9H{L_7&&XNyJ}%ESX#|yG?nJp*omM64A!3h~_1SiCjgfk{xg5yu!kPV(R{qWWdC1 zG<*SB_PX^`2^7D`{fL+}P&V6BwMJC7@h!x3)LE~E#x|?Y6&&FJVX5g`4_V;)Ntn2U zyy9%o*^pIfrQwqB4B~U3(c}TS!^^Ha-KrZ4edg+wLFdik1pG{Pz1*qgsLM!A4WA## z$_EvX`Ubd9v3V*Euf_eS_i^{z!zFm&vNsXj)jV5g!F3bEJcz%=)sxT9n?Q9Bqk z=GNm>IJD952hqJG?fC+Gt2<)D&MWb(!g0B?5IZ&$%+f@+TucjHj_LC1xZW-B_+m8U zS0y7S5JV7@Tt*;dLA)He7MHwSx5|-b#dcP293@7LvGO_m(i1e)u zxw-#_mc9BVjRqs}mlYF9!C!=jJ=ax&)iim&>1FlwH04rGU7mW8#|+5PZ+S9Q6=gNDCSquMDx5W0=lhH{Kg-bK--l64 zace-YaS{cb0y5|WpkoDi?68=wv~%WZhk_*cr?MVEjkeP(6{8y9p`VQH^Z#;@R|x1ppr>3vshbW|{^ zr=b61x>0Ak;%qW{8=>U!%W2yak&#_gu$0I`4FzR&YQv{06LQQnaXlTf&nvRa1;&f%g5#pPFx`k#4oi#i1jx?!mX_+T7l z4QdZgr>v}_BB+^z&)O2o&G3+T9ByLPOH5} z>lt=s9uH^mqVDix8mU9OW$i$?1+Ba4V4}nZv^QLQ3%>_R-ZB+d?a)4_Mb@qL)Vc4I zZuKBYo9XrBk#w>iva-jyjygK$@8DKP%qG@0H#E1A69q^&8g;}kWpp$Dv)En+d2y?p zr+ANgUeSvY`n*OwrV(Demi%;pbEq zY+aAkx}dYXjZCiU!epk*mc}goGHPxY_4&uBsye^2tw>9fp(4S_CL)5rU{(i&pT%%9 z5qWkCP3k6gj&>N#?v|9o-vboj%_EN4_5S!Y|W{DPhOP%^ZqjNCd!cO20jO6 z&ZWymwfJV|f=|ci-Z79)3swFT$^I6{?1~%g5 zDtu-yLr3GHw&pB)e9u5c3vt>Wp8jck^jH<*4Xs{oay#j0aJE=_Kkm4h6wbjuzso0n z)U|S~^t%SWuK+JPM+v7aV}KXy6DFDt5>XVe2UH*-yYCsq%L9}+kZV> z@l44&h4Neaby^u{v$wF(4~Sr5IL{)^_w*=l7BNEC_-8@0Q#~wjApQ z!f6@IfAuhCr1*x}NPj7$4l;kKC(H3f&5Ye#U1!a7icQ}V1X86zVQ?p$We~RItdWmZ zjvq5S*YhOmI9Iv8B6@3lx^Ia4A|d$y&eM1e-mm4?*DuGvV0VNJJRx$i(~D~3SH_)z zzlXu`X8PANtUW8vnU(m>O@47XH)|_c%$)Ux8Kp|^n?l5t#&;+5bFNH80eMF{ik=X1 zRFn%GEXrpi@VBf7Z{!8dbk@7b^W^qc7ISs!@?Mh?DsvyJb8GkYf{}s06!bFoHa|A1 z%61$kXfzLsHOMiS#Og*VQp0qJ#spAvHmrZ;_UjIUa=b6(=1!3S5b8Qv%ee-UGwqK9 z8_6Ov7RH-)SGz12j$LUL*1t>k)-jKeV?GrmF86`$XAH6}VeJtavtxgN_ ziaKTLdTpXHmu(v?NLXbTI8`KlJ8$jDmB#zy8~jD}O7x^UeZ1pkqJ6U(M8^b(B1fEn zH(ZYQ@}9&y>*friAbAcKN`*2A#d{{AV#ajSLT?d3MSeSpNTqsRAJLf-y_VzG5qa4Oveg$Kde7pA5borQ!>= zvsV9V4$Ayfq8Ao!6m-$5#4G}=f`u9!!#sjTte%J{sliszCY^x-ZwlI2e;gKpMg{s! zGPfsdcc`gS^Z`Hei`45MDHI&1&0-O-C`m~dDN#1pz14z=DKcpKriiR&^Bg5b8`Q-& zQ!*jknO@_&kndF8EQqc5H!ZgOsF5&(aJL>5!a#YO4V%c2gY_3g}rV)0J!Dc#+&ruqIE^#X=KpysznBw=3foD(K3K%H7GK|FLzxxfA9Dw0~ zy~Ih={79`iL2RtkhFBKks35(f_q$!O##NFMd)scdzf(@T0(&bWW4$L-+nc}68I8$5 zbd7tr5K`?nQ{sO^(pP|~>#RUsterDD(8U8ocfr${$OSdMEDO|1O4`Cy1=K7-jHwu_ z!-F{q!=WFG<~6f;)bJ;nx-2%tla>A%QAaW0*{9u8BR&g(H0+=+n7+}%h;?sRt-5HX z5>pq#kN{yB*{VNPMrJqrCH?P!!Zb=+HnA@v@RETfK8df|Cypk`l;?~KM?gisz`+P7 z6MyBs)`fyMSkwJoHE`<}eFh{D77SFR2}S%C9Z7uLxWlfVsUl}dDnM`dc4#o{)a19t z0>L4@2`WQ?LQ?U3E^!PaM*=BZTrU7{SXb`TaRA_ns3Lvlli3PJn)g|uI566>k5A(p z=-7ESVUUKCI50bcPTTEHRVZ&-ni_W1_{e=& z#m#vs_6vFn~Ju}?7-nRQw06gl`tXda#p*3Stj+}ohREhlNfP^@P%NG1ZA~Giv!dCB!Mwu52oD&5@zQR(_i8npnt2feA4M z6Os=mgf4Ao+103`K_m5vyB3!fL(|vuc+Eu9pMW?vjQ=!$qXP$I*7`hkzCy9w8Q3#KMA4#vZKSiJS+7_Lg@MjM^eM+3(Ju|qD{g3*e zD{{NBfy`A?_ZbySSVASGRQ)G~!5X;eS;Qp00t(TpobfjMQwCE|{86>NW>@3%2LjKy zvs9t$S;LhHe;);Tm*`D_t(Z^M#Xp(1`tBUv>d@3k_BSayOOpx}X4+{|G@BL&2otnU zH*A92T_KH7=yLm3sh63PKvwYUj6;-Q8``wLwAvZa>MUeHs}(pMN1Z{R4rFEQC`@T7 zM9DBbmok+yop#QPUqHA|jz?DW^FDyH|0sm3Occ4MCrZ$gn+~XR-!Gr$Ki>j!lQq}h z&;of+m#(M+D8i@LoureJ4||1F%+WX*^J$v>S_P&r0s48x^sQ;a5SKTzr@GVvvG3N~ zik(;=vTaE_HKm0fs1ky2mCCe>eHs^BxD_s+OgLrpT3*9ND0<|yM=FACqp{u2_4y=kRIy46T9v7-iCLvW7_g|> zfFysHl74#QB^5G#fx0rnmeqQC#5Z1-DiUn`vN#dD7W$;>xKVP9I#!UXRgw%YhirL@ z#4)i!+^e$=nI;wkS(mN9o-NWGVaHg3ZXY-X@(LNi1tqi^h|Fj2Bmw{ZVXN_ZVzI>c-i z<+ke~+T7q}t*x{h>Yi*~ZoA)aSqlF7-mlDWjHf_eD4isMSG1N#9BIX8uMqLH_%RrW zgg`GhUQnC>BK#9XBZZ$Nmacl_i+LMyU)X{Pf?&Uu;TOO|;U6rd4=6oaE_BOT8-u*k zg#ILCLG~HK`UhK4|0M&eqb}Qe=#Xxbeblj9_g6Oj*A`wM&BP2DW zVpIE+r(cbX5t5g!COSyinS%N$zu10Q^VNUS1m@|G!IEbVRwz<%Z^yv<#@()aJIb%5-wJl+><)(_$m0F1~r! za_i}goT?EFLS$C|>BqI)^k^#%;>w-=qzCV7#^BU6A^XdJNQOf?I`iG?_pfXV3Rr+= zi2IAm8qaD!i|l#Pr#^`#NcB-sgoQGuiDdwQ7V9}c3?B&C1iu_sy>Ii*|S#^BEYhj6-##Pz0o&(Swk_G_yj zUH!sWaZA7-V)d1~LzJ!yoF+4Szn57A7f5-*EoeeuHX zS8<`qj0(4#N`YUlwzQ=sGR#ISMG*;oPfkMsU~;EIPtCn$J64HD%QUVJ9uzO5u}f^D6`mk!&R?6l z`RrA+EY}?-7*^iWnRK8Pe*J8!oyW;U2|?9fdSlj#5?zKnNA|xy(c-gqPbh2GuQyi1 zQ*t$Q?HG-Qobz{w!+IFS`LRhXt&bEwa3>hi=QK!|+#uot*$xkK2oJLT0cMDJ8K9-8@v0%;)IvnBoC@;eOu*IEG_gBx&+ z_@KcD%jRt|(1Z*m=kH9`(%^-hqAjEk(>di8C*&@O5$W+_Of&g%p5eLuOt?2JPB-b^ z5YW80b}C4pC#gL@2UsarseAZX)%$2E<7|2lZy>4YlzZ2`xvBnW;k|Rj>y*in#z3)U`1+_BqAAxiKjIS=(NwdIbg7H(SC=6+ns|8AtiYvfCJk&3% zRnC>@2~r)}a=C*(93Sy%;dArw6~*9%_M0wi7si*Nc6N*-_)16ig+Bu7uY0QEX?+d;Ljfi8pb{O&^j6x*&OwqgHykKHH3e zSz+(4#X2E8RlUJ@Lql};VFZzwI#L^Lbm}^MlL?13i73%=6c3Q4Z=Lz88v3Sqo@qo? zJP>G4TBb68=UUowbx)N?`INlIHb8wqn52QFD*UzHxL)s@O$Rtdhh7OTS^kc`J7eY8 zprr=b_eqgJDZO0c_6DB@KFtGX0rn;c_rR+a;8$IkRdkI-V_=t)v{^Ji59f|BqNARA z{zZ+a+_;~0zQ?|PftqoV9uG`;B%)_j=%wyUk=g9Ty*z2|q;#t}S1Z#hkJE=J9}A*9 z?@D`x*oeWPTImototg&Zis)GTuWLROey_z<;&@ua>X zmmTRF-%z^AjN)2y09#XLe+czpq)VL^sBxdGVFs=2=shdWB&zHOyTsLveDC7wV*g4n z|DkEBoY?KSq@P!jO0`h-@$j)oBaWQ1CE6%uvH4&Zc`gbQ|3ZCf;6C4BE=!~uw+~&C zW*3B zG=JD{1<%N6+$29K>t3rWjCZ}LicF*|w~GIIwoz1d0qMN0q_p;7TH^NX8`+;$wUEO7 zFG9rH4FdCD+9;}h-4vK!{|epiVe__Kpczn_5T;tEw+*uLlH3cK9;gfC=+>^rAaR`N zPLCJ_Ob+&7IlLU zG7u4Si}~#IK4(e7Qr^J4LFOq>b_#30V6mROkS);Ytf55-H2iZG@j#HA`O zZCg>l9y|SvhSivdCZ-1!m5QTLC$wMHA+(2c5`|Plo+)kBpNSxr<~{2CWoN9EgJyKOm|p@4 ze(wIt)G%zP6)x`0TWw=MxP-b2dy^Jh&!`YOodtUkZZd`iX@V0`v{NLKiU&Wuiupv9 zFpE()pUe19zPO8?JBShXO152Dp~dL(XL2uvoWH^fO1s{ib=$k6 zq`y&Xc1{kd8zk7#;_I{k4vce9j*=Sxl5_Sqp9Yy_?yM$7GHN%!?|GZVrZy74jO9Uk zqpQ#8`g0dcMlcHFW7$Ij4C0}xd6nRFMps;nu=g6Q#AcJis_TmtXw(@iO}NVKw1SAV zp2+77TnVepWlh5?aHvJDfJFqUoSdO2_|P!q0W z&yNJ}zmValJG5!vA%R>FU3KdH`fS&qtm%jdrCO$kym7}}I`(fTJQ)T?$7Pq0s{KYu zEcLC+0piYcC5|YE_ce{bX3DPCww!s=I@KrFz4}9>#s=D^hI0i+mPo`vX+*M-h~T8v zS>P6IzrPic2g9QsO3BWnUbXt>OydN_m?Z$bP0eT{&Ya}7b>SGFdYJ)mij5O+!howu zf7njHIpVdxHTkvGM6hl!ld98#O6uydireA?nH~GbdL~{N>d3mK;dK(iU`)rrxe?0R z^3uMXi_p0QL^c)g6cX(DscCqqdH#F7@)Kj1=yWTrE&6wolYS5C5{zeDN00)zer;Op zkhfm`T(27P`+l=@|LkdQlyK{KS8^~+%U|5J9HC-+G~=mG_*6XY2VL6h+UU1mWz>di zM$`@&D%OODKe5uMAtuSw6F2LMG`>z_e#eNrO{8jm^<$~-d^yk7SK4dfA$&3u2|SjX zs1c+Hm1k{ZCt_FKU}?0EqEF+RZxJxy7hp1dHNMh9%WKoM{8&Z*=*K?Y^Do-$E{bFQ z<#XST>5{9H-RleV^!o;1+ZG_*JvVes#wbg&WQrK6WkxupgLlk(i(d<(!w#}W;L6l8 zlY|qxFLYoB!4{fs^X6!65#O;hxno&Yq@H{VW&6>LZoouZ5yn9hw^tBq#Z{5I&fAI5d5yF1k2>B9q$*48HwsT_wrbU1K5-!C$ zt0!_#rb`yf>jCvnS-+Xxv#Uc({kIdg{xobK#12tzKiI$}DaLMEHNGr54Tak)3n{Gq zA8k=o(uN-y%l$G_=aZe7REpmfT|+xZLJoE+&VmMAh%XN z7io^NqnEvYXA@zMP>2ulSHRO7E0D@(e1StM$`Ly=$ClN5`mw^7g9xYWd?#Xi5BSyx zh|!YVG!+-byrvrVJ~f??(EUv_E34`myaDP}KQu`tHU!&}=Ih~^e4?I*={6gu5fxoN zR;?+yEuWkGE@X%7=hB(dBdNUysFKCD`8Z>ZW?iQz9+R9R7?)Qa0bd)z$;iNq^?1qG&hOj2KWE?R^*EQiQCVbb#b688Oc{#q4!}W=^ZUllO{qa zux06pmtL@o47gc2?j_n$Ck>>@x@zS=f7!)eUkBK$vlG~Z5Rp2RX?!S2&d{sLSYYtF z*Oz}j!Y)PKz;1E9K&4Bi*JE)$(tgacIvt@{YhCOb!pf$x?djY4Kq%K#vC=+ziPG8D z7R?g{4S&kId^I^jn5*|^!AJIiVr+F5$epi!rw(-=z+7Aby%UhJsKu@5I5m1>9gv&V z5f56^w9ztV6XggQAX$m}hh%7m{0iSBxQMvZ{*PTPrNEIW`(Wb>=AG#h!^Cv^2 zIX}IEaM!D!7Ar}~5kJ$}36R*e6_DEmes%g>G9}^kpSTy>79^_8P>jgWZ#NnB9kvT% zb-finIQRLk60?g(#4aAU{d&(anY&y`X6|&W<||1sZHxkR*mIA0oYFn(Z#6duJ^PY(1xD(qLK3?6liD4zAJP zK`MAQv^(kj?<>ZZ?=DDL(usYM1_X>%{lw9-|25)0LS6P{>?TtQ{@~8NmpL@)^>!y=fNz9=wNe#a5OHz!HSWfp3A{k;v%Y|I48eLvM zTw4`Y;112x^53lv*eCQ38>zI*LuW_{heGQH*R1%F_M2A!jc6Q(w*9u642@tnsjz!# zg%aCuGWHUKD80fB`~}T1!LkA=)>xnJqQ{^D6X2s}+x6LsZ@YZ*GJxs8BzqVWH$Dg{mspSY zerw9#_=4*v79>QHL3dL6P%Et094<++fXE@W)zc9ngTW~Vm?lY3yg`DPW04>J zi&wz8Z@j`3s^qsnqy&c#k_^cIvj8%_t$SmXw#MDP_?x>YPjpMXxQpZ*SXcUqVt5MIylK8>0>SUa_KZ9yWkTpp-o?c#3AAFH%HKRGBb6;WYEWW<%%*;Tn zU1&P*^{{aSnUQCL%}j&)?g|`|Ov9G7F~}QkrTz0AHI>OL2$BJ>oydI^v&o)7svYsq z0FE$i2Por@@nCH)jVSDdjHihyv~T6~$M@bWR9JIQUN)XQ)Vl~K>IJV_kWxC>xU=nt zB`fbI9s2O(>Jh?V`F?AR+y+bHJZYBGG-%g~<&3;I6dk6xyF_o7)JOl=l|1d#jQul& z%Fpsn#;H;Ob<$KvqdkO4o9;p#*Wj}*Zv(sZM;Z*i(3bbdMUPd!hka#X8P0ra4tT9L z9iRtp&y>-P1yLb#CDo5J7uab^8CsnzJ^f92!y|xtdZHT$f`q~x4Mf!V1?MOz_BRCv z^kO2psWooDfHa_kBQW-qwmq3hzMZU7YqAbpbZWY$ZxNJC&Dlxvgv>m=R(2UY*4%!VS?hRWdEI-XJBer zT7eaah#<0)?s&1TStTbx)dqdcz>)uQ{fVyD<$~Av+m!dj?FyXqg6xax(4kn%#^q9y z;$K+Po6K*R3qg+F2CgB2Ev*#2o`rN<2hsHF_0=(=O|;%AB5`z>NlzYz3}-q1Ad(V+ z+BD7XrO`e=aXXA|51|FmU+$2jJ#q<;!fJ75s*6r`oDQQyffFIJNk-yJ&FiR1BAm~w zJ_avSQ%}Em|LPD5{?T0W)qvn-mBYom1c>NDn*t8UU+Gu#QMXf|h{vfZ5$iAoYY0Ey zY3!QeIT?v{+zUp=6YfU}Rwf#-VYR+xVW6mO7@E3fg==CLC!Q9G)+gO1{O@^(Fr zdAFz5KN8kgMZPr0afuFD%6SDSCwn|_RW1oAvt>rUP#@fJv&i_`A>fAbiQzSCi->Cd z8^x%;x3FRokB1uQ$Es1x?YOglj<(n(7BD}V?qm4uB2Q4OsYw0ZDG6R$S7R))5!jeh z6~>?`6dcR}LyA8RR&JNwh(#!}_qXrugpyy&SOgDZ8qA%qG(D{Xl-3+gEO>Qj{(RGY zN}X&nfJda)Jo{JWTc%gN;129lWNx3pGe4KnZwCVFETZi>7#arCq%yaADH2Zwbx!$I z>b>xs4X-TUg_=HqvD7zf?KQDrregW_*qs&0_(pcoVcN z3UIDGTg2(2XD6H=F4%YX-m=Lkl_8=3Bf{4xd687;A31;r>G+QV#LUk2-w~y<#R*$j zOL}qdh5JE2h+h#Dwb&wcpVxd19E-@&PSEQq{xBLCrCrOEU+CPzKqsIk0L8Ol{JX$j ze$eIZcM_qH7iue{V8ESKY^s(6{}*qlD%104c_D|>rlIizL0k8P?n?MojY^K}O`tJ= z^<^lW#p~(3w{?T)rK!O#=oh8MVhdMnU1zP6&zt)SH{!E3<>dlU=ykfhh@ei3DL9C+ zU75hCD3G+C^#!g-WquEHyzpn(8y+S;Oh9R79w<%v(l9Gu`P2+0EAUI&mBlNaT>Z&{ z=OSdF=n+e}N=oJ2sS4#uvGI@@Lf;ce1X+`+08z9Qlpmj7fmu}qsa@gJ)|TNR#0-TR zNaL5@buBL`(7@-=tbD;I+^t~elgGXjU6yTb0g}lXDv6$20QF`WPLFm^81n~SiLXF%hAS@ z2o;$FXlG&W`Al7Atf}z}Ev4tXmNIKr<+XPH-^=uW@*C-N65%v<$n@;1DGW5lJ+4UO zH#P`cFx8lejk?g%3|Z+!xszuiGyCh9ltpyWcuGjMw##qf&x!po8-M%;-*!=68&}^2 zBRtgk6*b<^@^6jo!=_qAD`RZl>c5*Lz-Sip%exkgC4r7&n&Gr-qaIK2>mbj2b+CO#5GG9Zf_u`b zmdW)~Vao=tIA45kEcZWfCLbjeqms|u6T)PJ=4l(XiCwBa@vT%*%NFh%QUPhUqizB{ zVf~3EI`A0A6>;dG%f^H4>=O!qANB%75DhCb{zLJ@yLwEB$5dTDj|6e5;f;K z3dRhabjMK_J=PWS&H~Wav4ZF6TpYxb*ZJP@CCUq@OOXYsDO=p1hF&XU*&dLGM1~S} zTxLhz$NjNv>{$>ey|d2Gkw5-hhO@d=k62N2M~06H<{mA5dF_tR19t_~>_RnJqmrAC zUq$P1B4MNawX@35#AbxPi3Lcl_dvDpL`p(T_*sAB1v3w4vgZQ;1qCjM9^>y~95;*` z6NaF#Fa$30zAoB|2mFZfQ6B{LCB`nDjaGpZcW&~!tlr$W&$$Qk&(hyS00Kjgy$VOf z_K7`@JyB?kr3PgJ4pwK+!|t?Q;RZ%f0RU|d+h=oTN|MnSfm&`J1Dm~GiY7(oWFedx ze(r)4!G3Wr7H+@a>!+ne#Pkm)j!E^uNT%4G`~JN6`yl-<(I=8ZJ9U-1K+KRH9&Dwo zdY03!Gu8QdFZXj%o$deWCv9p{6{1LTMJ6&=5Z-Uv7C@tFR}iM6;Lxwo$oNc7z7BF^ zLSM)$zphy$<>nxb$Qk_lt8I`{l0%nU))CmP2>j9L+HBwm`xU7<29 z<#4brnrQU@c6Gr9J&%PkYcD2V^P8Jq^aTzNJ=@0r5`YBj@{vUWL}81%N?IQ!5fq6hF)$FYJ5z0e4_VS@nN>Sb4Lb-KzCi z>9EU^Ze(BtU#Q$(CnL?=egm(>*%odxgVZL$IVbygBywxB ziLYmD@>V^4hV5(BLX&wK13y5*a8e%1_&YFPl`%d?Y_KckOfju%V?BP(48Bc^3nV$T z($@ej=<;R&KUvGYDUT!Z7zi^qdae(zOgB)eAa0$mnf+}|z^+XYnL z9m#y-WUM-Cl;QcN>x;;!852B*LNehm?eaQUOg_=n(!2f9qIjxUAEVrsmKc@ow9iAK zh8Pt~UMx!Z%+DQVP^j^QC_@~xhyPc^PwLxhTqSW-#BU=>_JZG}SZ8~EF>gaHkvCN$*z zO$&(&jPOmR3%5#RzK}CRM6sQQWG%tL{XV3#eSp7?d8I zb*J^UcIRWGDAKTS)4Jxi4YJ(Cf~#|Y50?+$=s?gl0m=2MU(#c%q=wls5mCbKFg1++ z7VB|W(zU*B_)6=N0xo$8#E5ahXL_K-Q?nCq_n}AWnOt6q0w>k z1t#&NqSrnEO)1`hbzy8(%S+zaKy>`$PTbQ9Tf1+Z!CEr(NX#rH%<@##$;n{dStc(L zX+SWT28elSH0`ydvH20lcgEtUY7|nhi3MUX&?y}0P&_&$d8?s+^x{~#V$Xc^MY8LR zoCQJ^zv#QtZxE?O3Q>s5Ok!OyRS=hnmq%O95`M%xIY1R);R^4FRUBL>XA7k>p7VLe zfp=bR*Y)+ayK$6Q+$RFF&U{U9)L4{)4(V%Rr|BaX1~+F-@P-LIDk7v(8YttX2wt_J zylUE)Dr~Rn)`-IJ=h6=7Il9h3QpbliW?15H4>vMA*gP2NOEU^r9e$!l%wql%igg}O+d6^ z{z{|5TKH!$ibj!qdGVIyK)`0_NeK-`c+|U&C*(Em)m>hBxqvej$sgj*t0>@~2UtLh zta-9nR{V`9*=NLeiXz2}I=0oEa&v(f(;8Q0>_(6sKNLM^I|?xR!S#4>Me>?vzKt~7 z?(i`K;qrTPaWB1U1CJ#b z{;_xdCF$^|0bz-m_EhDE>gB;w;FcWG)NRo^7@sha305=0;3u^9=9BlwXh>~?e95GrTlrls%= zzUQ}N&SaIPX>(nr%wWgfd^DatzFWgyGSz6bO<{u7g)n#>P51g9@pkKWbvm>M)LbJb z3+EqX7;P7sb>~0*XDlhsd7Onr_(Q2DWhJj=Ee|-zM)_li*|F74Vmgz#toEFI7YLH^ z2}pRR2v7Ybd{Gy~rztGYJyt2rAXJO!W>GG$Hs;+9NtWx~_xQot7x~s)y zs_-%+OLKhFHAmoNO$&^up5sJIbv(i%YVM0LD4}tp#ogu9q4uD3L7uXdJc8VeeV(W-XP%P-Q2%fFIMz(bVm%GIFn#*_huw+gvQwIEzr_G^CwVdiv4#MK!U>%DfRago>p_ z?dMb2UO@tN7gt32>XewBSiOxbI{e<7QkM6u$52A+1szfEQ_YqE5lt2KKzAmpAGWn` zbxwffxBH$x|FN?^J7g>!VvNmOhvxxUZck)!xq(UtQHGZE8~7 z@{SwHT&qk4Zg-ChwYho;qGar6nKqY(czq#GMA%RlX`5gmf8LM#g3knN@{oiRtU1l_ zQxorxd6&mU9B|`&Xc;zZcBsdtNV5CMo$}V5@8Z(dqGVs%BO=^~JwobUny13x<@->J zSY1Isa*l)1t>GogX8%;nkn2aF_})kF;Go8&((g~w!#^MP82bG-Vs0k+l(%5VO5&X? z<~{wwXJV7lvg3o=4(F+YB#n~J*m%zlm8kWT3B=iwM8-AADZa{jiK+Eb4C-e`(l4t{ zg^k$PxKWVwx)~j$>TcFPI|tUI+C4)kG*CNuG`_w}3C^qY;CHtJ$rL4eB8+Cr!Swym zAJMN*@VSV<@l$X-NT>`t3fP6yqVQD;Z6#qrFxeI&x&wX3wNr|ihke9=Tso?qzD^F0 zPZHwd!@5-P*kQ-A?(0lPhTq;cQ8w!LYIJ^_LP6p-NfK}!Jx+IaWApox5ndY#Zyd`l zmG2^Mk}-Q&(NLt<`r%KN(eRX_l5#kW<LM>2+U%Jv0rrk{8J%rATbn_uhw_u8wcc z(rKDo{TM8hWe(w$1b(^sgU5n9aG4|Q_6_H5at(ske`M;4@lM-BN??SU%)az6fdxku z6ehKpTqf$itTpO0;&bJ3-ebQX5EEC!2)Vo`KDPcUj(rrGR4e``OXyX-&hvmvCM+hG z_;%w@_Cd!Un|F7OF87Z&wPm)ZCO2#=15~>x2Wm@+Cq>6j@GmD@TfA-#WD|5Xv4{O_ zcCO>Kz9uiiO};-}C>=H3MuQ=Q6bz8F(uaQscrm_%v{Yhj5dKX3V*h7INl(R{08E{NS~>K?6o0xCC(yqjBKf(m{8Jz#|W*3*@E! zOy4oXm>qJ?M`EW~ZQq(K#~oGP3`bpsJPoXcZ=+=R`AgYhCS<4UQ(G+ZBgnLMyW86f zpEfrqUg{-9-$0+qg&twOg$?^Wmgd`p(OJfpq?YVIJG*oHik@f_>~UpCw&&Vj1;YV$ zqq(T_FWyKEHA=YOaW!E3HU1$kGJsRZ#%G%-5abAIO!<}W`xU;(u;b&fuH^d6c%PEi zD6?lQ<}_1s(32D<_$XNn4xIjVIQ-BgPdr79R8$Q^hbMx#Y_gMAS4RdKsg`mPf$DmT zs#5jO?A*>jf9C8rY~IFFb10ae*HRIDO!eXochpWG+nX5vK#i{*oGeI@tZwGG?_dTm@Jcx{`ahA>lMpH zMZ1_fS+2_P;1XP1gO4A25%6)5kLY5EP8S>aw<>mxHJK7t7$;kA%nLsMYDNEiuSgjE zCyYym>Q^LJ#3zrna(xruwWrQpmg94zdFKI0BUxCzm=V8kuR;o;xMoC>!I$4`MSN74 zIL&Ynl&81aS;;Un*yA_gl_RZOoKD^mnY#t^uHcwBqNk<5XI+P@G8_We?*0neK0d?( zCsW2gohSW8kCOQduF!Ez=Vx6&t|1&aEAB5ci=T;(qiY9|wjy&H<^_TwaeLDydB&hN zajlSb_3qxum{D@{3v>b&08TB2XL=;cw^_3fhu~9-AIHMMy26QY>L_c|oTJbByuPYW ze*BstxiiyVC1<#FR6uXk`rY9Ak%2XL~o2R~VDN@G$J_xg<2{szC47G`h|{C=S0`I75_^!;39-tR;m z&Lqrc(i<@WZAx0b>Syg`ZW&AIfH}l?aTf+|Gd~x|ne2Y_f8Q)Nr`tg;5q$L<8cp#w zirK5{PsIxSdUa|efS9BKV$wF2tp)!X#cGB^%K()%{8y)D$K!LM-S-h9Ca{8@Pj-P8 z0Ij(r|#?)+7QtC9F&ngdmSahiTfHy?K{Nv z-HUhvW}O%;hVSx;zsg=Y*tg45vbJb`G#Q6%Ye8(bwAPNAbFo*pOM(w%NNXKM47aMb z_fAI*xtqi|L9=Ua{M`pNTVeo-?-s^3SN??0;MYZn$>hmRwbEuPIW_yfxM4~7L$;Jl zdiQDUdfizofl|mXR6fu?*{>_eR7h0orPkUqsYHX7w+tkH+;DNnCeQES9uW0BIJ=vDMARCX)Qb(bY}RH@ z-Mg*z&RraRHkEJSVr(eGxR~WR`v!fwJ-a#nA6H))P*vA$O{a7mx~1b#(%s!%hb{$_ z29@p(;Yc^qASK;McSuW@fRr?R8=v=i@4eq2fA|B~d#yRa&~@hJX0t`wUrSI|HIafzTFf%oj6`&)+(p`H{sSG z`s2($aVu(AV_dqdIfD~R_T6`vKFYoyH=@jy55dt25`hX4lidA|L;=L)gkY&n7=VG_ z0Svq;CBY6YI=J853KLg`b~%`nJ+3H-6PGf2vXDs6M5N|dmp`Y`>WACG27k`a$$qUt zoK7P71krE3y^t-Jg;r{OrY9TczY4-zhi<46v)9jv?rdraAXyv6>!n!~I2dEm`aW@x5GTz(MG1@JSYoU78@snEm1Q45a3Xq%m^3x^cf?QhPL}J~ z9$%+%?lBVK1Gljt2GL;Y^-=at&z|21vr7)iYJq>?Gx+gj-2IsgPuKuuT*9+>t&ha8Pr_dR-LKeCmb? zHlY}U1s=-C?V{*bp%G^W6Xx#@NnJL5m@&RaOlg`p-DiwMjH0S3p!&R$Ox!bW#$40l z^OBY|A1}~)7R)-0LhRz)tM!^VqNr1T7EH@ch#&J)Ao#5}kv|w$tvkN!(C1Z_)0Z4^ zXhL|$M;_As>}y2}iK1VWdbx2ei5bD7>~SX{qQ|9KFf%?e-I~p4exVy8vQL#{Orv{$yt&`e|F+tDob0s^W^7yDB zjXTx}CWIdV)PG*HFjs$r4558O-=j#Q4ET{;{;nCLm0zjLVv`)OwJQ%uP@t}uByK+* z@Wyr;Gk8?$^mHoI7Tz*Om(M5$&l}ba%o5FG2D`4+@gy zTNx}eWX`=uDMC1+&3>W&Z^9WtoW~hc!bdXGD)9Sc(ZrF+mR~|W+cQ+xiK6XA{+4lx zxKF=VOuqMKc(K0$uOK)|7QSdWzYRuf177))r(nZOrlDlQ@zZ@%9~lT@W6fjPM0)a{ zA%iR~_Hb_Aqin0l%inv_>5Ey{->pXnO=cE>nr@c|r?73J7^T7R5d@S1 za`d#h=CXf|&3Mva^9SJZIgw7gh~8*;?XRW4>aLhY=Ccpf$wx0(P0AiT_IV`<6MFMA z^iySxog!>&rEjFIcI+PoYn)x59dhN9mRte%i%LRp=u&3r+032l1C5-+{DNh}3jO2& zu*q-g#NGYVWhUfJX((yx3_%*!Ko;?{&?IDB!bN3V@jwRfh%d)&4Y(A#uh$;eG~oSo zy81h_5OUEt4kX};+{*{k99r!32&eRm80Lbo`C+oYAjXXSQl0#ud0s&;6N(Oeua}}f zXO*a$NzbNYW;BCWj^x`0(<%@8h28^uIiO0Yc=wmuB2~5WZP3X?`t?C6@$j#-fM!pk z_q8x1mACa{Ewju{ZG$GM7O9=f<_0`8-HeCKYuKZRDg1mbrA6N>7hBqbUo{`tF};zC zLr88Hw8BW~5Gc+&89#;IIwM9V1VI+sbWuw!QL-e}?e0N?-^VcS!IY zZFbVhhp)Bcfi$G*q^zl8I|YP;8PR1TbHIpK(BC9Ks+HXtCdCU5 z@Ia24CAE`Y!JOz^&Kjz+5#=MgJV}`Q+!;b?Y6{-A4L;zInD{22px8y2vO6!r@7i2U z8K58cI6HN2_#&;lrbQ7B@U|#;@vSBkcL*S_uLxAUv~L6@<-rmLQ%1^Oj|-vr=HiHv zb~jQ;UE^C_E>en=9R)Q%2O|zd9iT7xXF3TRR0o4R;J=NWk}itCb}z6bP0q z_qVI6qn!{_bn<(?+V%4J|J_)c8xe){)lImG7;cC1JZP4qzd|ttZ3sv4l=!mzQ2xV5 zrki6MRvi=RGXJUdbM>ZTVgw^T2{2*=gz0v_Ih#^m#8m0_aywuKjLNu^P)nW2 zJ};>x8mwJuyxwVn6@xEqLW0^$6INOoJd-7qei}RuMx3&KomHrDeNY)-{%;)MPXh`9APr0!`!k)w(o^d)G}R{>iolLc zY6@m06(7U6;>~sBIW}KjJU1ACbNgP+NMICxNK-GhfpOhCf*c@8kXMNc?xY#+W3>k_ z;L}01bDjaL)V9A0cBFJNz#XJ5`0XR-_Su3fwOYSwQ2_YX7{q<9LieF4A83UeC#Z9CodAa@~k0kQ%`> zRgN^Usu#K4xos~0^Pp2u6n@v$>3Q@w-i#CL{1yx%N2lUb4z+WyN6rc`qcu8$CjNelNTa#c~k0my#YRa3^1u z&BUszVW9@I=ojJs)XuBg6x6Nxvnv{_0dBG$$A4_N_T`123Wc0@tX|J#Is z<7k*Z|7k<0MIO)taQv&2odOYvlT7Ovv&=`-G=p_Do)sS-ImIntq~ci-x+$`MpJ{?3 z;`6*^N-g=J5@OFEUIpGa)NGge*uB!?Lxrw2B z8R>oJa)Yf|OOS0#^&`RCxr*ZFZTmc~wKT?3*;CdqmpUMU!&VWr(lBk_b3%4(&av)b zxDjD@HV-qfantb(&FZ~^pXH`6-^IB5JBXwrOc2v69nEwGrUE)y zbQ5#(esERRS?rc!g>cce(`xYTD=eyqv_) z|Jn!H^1wl@mj$Tf2|Z~*gDi+Y?n>?@#$!?TH{dY@6v%XR;7yMLt1-SaWfPlzOgG1H zhTBOdbKl}kVxNjQ0C^S&0dF?`Sdy-L%S+0&(9`I8!?=w~R} zPyr9#iHh45Zmi1`U^2@gApY|^hoV=*L0nw8DzOQ9>$3Egk+MBRseI81*N|xXkoufq zcxK*<9;n7fYg$;X2QUUkzx}HgKmW$#_aZBGcZI%VzIwt$`SbI6aomblpBi=vqNs2R zQ1)>y4rEaJqw!G)G33B8@#g@^F9AqCV>CVnAD8_G3GopsD?1iE0$~J$3XgSV3zye! zLGB#$$E&y#xi)Psg0iP+<8V4f!>4mA-wMJeg4!tuEGMrzD`bqM-4 z?4mVpoBL$?%|JN|(UFKf)ZCAF%k>kil7C0%g zbcdqU4hC8u-1m8L%i{wys~ezMnY=3%EwW1wp6MG_BPxD^yJ2-(noLEjRlpa>CV>S@ zByDVa+E=(dcnQpWgd=%4PGwg>N(1N|8R@>mZV4Fk*Pa4-K<6m@tm=KqFkxfS{ylI* z5xL?i`LF0bI);Yx#?~pYtB5~$_E93cH0~p6UB!1aq&9J_%k+qG?T6i6`xfA`D9xN1 znnJNuMX99<=93)pNf)#N9Ng_J+m|}?m`}6M8G8fh%U3Uihk0uTxdG*J7J8mu_It$SgJW2h4Ya<%bV=EAtccHEmaGm1 zcaW0Dkv!CfN#3f1kbYd#gOQ=qR5CknlID-6MyH*2P?2BdGmX;OW0unK`7yK*kw? zt+nHSmfmB|SP8+3KlX%gCuE` zy->{~Wut&z1MI3abk>(y`d?EGIN-3qv6SsSmx+GEL4swoEI0YB}IT6&Rf?nO3-w}?@msXEGBf& zf)v<&OIWxwq2&w!19*WgyAcFFFlhdvvS1(ug$;Ak7)3YTdq}Uo#R7B{0KT8`g6K*$ zs>n0O-lzRbS^%e%vu$)1`NW$8-?bBu)dQ^di$?%2@L!5pwl3mW1{VGQz)lG{n8D`b zU;b*9e>J|rp5DA)c9jYH6D4j9{h)+tS)TC0h0{{TNib>AP?+w({+(WxZLwtsf&ow!A(o5o$seBg+ zUQ=?uzc00yb`|ChP%ns;Ud7q??a@bGOVGJ2jTUGnggO5PJ`^1S043AAdXGzt?iYOu zL=PyLS?Sq!_$b^TmAzh=YXVk-_eJpO)J~o`APN9VZQobB;3dd2DU=N81!UdD{AsI^ zO>MQ#cU4DcKPfWBSn?KbR=kjlfQ{Jcwp(X^SGn*}RZ)8v#+8u*vU;iZeMtQR=U?uS zj3NR})Q?XY4nC`?vgu2g}we14v`HD0b8K)3npHX>UlTOBGhTH0MA z^hbaCysJimAI#b0z)if_-?HAly-pVWcDGJWV(Ey76JLt;pQ37>i#6H0V@M2h_1njMc266}0BlWaHqD6$7?tMZ{9S}%4-bd?ozH^% znh@hVpTb4`qgU#|Mj8R%UsQ*eoRrVN$Bq=e>zh~()^7uB01BV*$o*}tmkG*JLl zn&BRuz0JylEN%?1=27l`pFQpi`VVda=pfGH>}!j?BqjOmhL?l|R0)MB`}IHwl`;`UOIBZsKZ-y=C7#2A-fp%pT?O6WF@!=nz*wn2WT^qz%o>~YHGq>e*lj@do zbLF>{=FvbQ6K#$$txReLQ>e$eJ!We;GeooRY)h;GCjic^H}c9-ZdXnH7NJsy-o()Y1tU1 zs7U^1fcxyH&;@<_)hYW0rpvXAI8iAz6;%P%-9e+^mN&CbXl11lP(Ln!Q0V=Z5?lC` z&^&LDO8J>fP<@k?1GH~g6xve>qLstFD zODo>w(#Rwa@kG!=js>`SAyf)j4}0Hsc?%|M%nw9NBu-ZSL3ucqzn}W}q^G+;8B2Hw zqYj4^cq_hZeL)E|hc~;238#l0G$>NpuNoNy?{}C{0f>FBr+0OUj$!yM4h%tJQg3(T zoU-Od&2`YH>YOCg@aeMF4&rx9w26IczLFr0=dF8%CY;X;B19zEe9)3)AU5ua`2`%j zQHf_dvlC1}I+aK(_8`bwO!w8G8?Y-rR7_GKgqtiNa?Vpu2y@Lt-Pq3Ue zW)0sy;4o7EP>f9!vp-EOEEH{GwB>Kwn_E^bU38rGxwX!j2igJd74BKqQ=%7x&>pykl_b`h zg!=qfjF&^1DxnNc@bmI}e9gmI(JnE(3AGbM0n!Zx?To1tDj(`R3fY4fwC*zUgTbG1 z)Vl9?G7@XoGJ?RzWDJFFoJ8Yqbaqi@rQX^`CvvOsQRWJsy(XIWd10v;5BaEaJpR(% z>^KeXpQ-HKXclMIsKew-L9D-67tKV1VQIYj7q`&R?Lc5>|XQF;w>A{?zG1dCN zox0EFKDN4G_J4B~mUNrW8{oo&1yVd+(6WX#vgn8j3`@f}b7dBY3nl$x*Mnn&AA8gN z*HWZTnS8`=c07P6u^1mubqZU3jN!d<>AY|V|4^2 zM>()~rg0*4+A*I1|2HR`GaRemB`Ex?j(sI{14=nGWdLZ=j?HG#e8Bwfq8cqBoT

    p5MljN@&Eo0j`*oi{7#U94$hy-*Id1O9h&;l_7o(wKeX~@J ziScX3A!5e|CPs_A-@mcOG5>t+BlN-4$3f}T{$rkzWcHjnVunooKqBSUfDc?H`tN*B z@dmrPfo9pT@?lb|m!k38o#E__(`T=@i3Vbqjpr?Np0Dj~$D-)FBXjpZ1FiW_eP}s! zstc=(Qk(aGx2MAB?tDB=&ln^r4qSho%p}j)K}eOVY04;6Qh|^x)03-AYxP~kOz~r- z7E#&rXl_I25);e8?tD#i+67vnshG4}VSqFH14pMPPNi&flmz}C%(Nd<^JF|+O#OmL zJu;CV?7bhL(9$aUFR!9YF*}S?Dq*iY+`ep{_%WS8_)nUK2c%&FFe}V}eUVT;wE|t; zfNlQ$w|9e$!(Ciz0m}cY?(449ryHB4IWIcj?UCy{)J%A&AYDU%$4IFV|I}rZSN4J( zALE1Br{2Ut+h7$qpb@-JlHTfX!x?Yq^_Sj`{`)xMj3hX$3K+%|M_3VbS#(oJv*YQi zO>8$LbR^z4cXE7Rt{K{K6|IrPn6@k3N(zTJeRGprO10vc!EOPjXXL6FeVJO$A+D=2 zi()G8?L7OWZ4=k9UCf)5G~${2Vb)Il0)}mD^sD+X=$2u_st?gl zPRV_j)YD+v)6Z3Pw#WIezxPWb_1i&rFJSnEZNL`MV`T(f&QExTIe5MJt3GM_fZK3! zp^gnM0CR)NaI41vfruUn&OAyH5%ixriYrtTI-%JnOu zoug9nTRl^wGLJo|(Ym}(hF;V2J=rA#Xcb1lWgABYY!^*)}rJ{H0>cR z`!_Up!RJK^lRPUE%3bU}!LE$&j3ZuPp<|tXv*cd`3I&i2^i4T@y0!0W1zE2mg?1A< zxejL*I99v_k^glGR?m`-FJ@r#?Os-sMIxt=aJ*@*==yp|#A*bKIt8Ajm&cC~X*Uf) zTSGbcc?fi>cYBUZUTh&*m^M{p?=Tu`K=#`#&154TRFFWa>sOnwSg-bR%U&TbfnZyH ztX5{=U@d!nrv{$FWgv5!tbFyIy80eJ4?aWK#g|CagUoCq?g-?H?=Fmp5ib!T;~);s zzSL`h-+e&eHOcLn+CF`NFrD_E-HV!`9x0hhy{4?Uy`o!TM{fu!Bt7{@PV5s{;r-%^ z`|T``E6WRvkD;_0oSuy|TwWk%0?(?2t-soy@9YK~Bz)gC zY-jo{rtJh25-i;$o`6iZfxa;aIx?KRQ6%3{k?#dDY5Ixyaox>a*9_{m<8SU~^v3}< zM=z*%y6OC(Af2_IE^RD=BYk5lYW2&O>Ffg@way1Kpa_0Eo5T%{caNiGa+eSCOFyIj zF3Ky=zY54`;b&a7BJOp<6?E3CJ6b2l*!DpZ089R7S=^G@n& zLvD^(=V^fDKJsBzdRsD^N0Qq)1t@QI5`93xEzQP+t}E0rBfE)MJVbmJB~6OxDYO%z z`{y#*HE+b@1ozULUCP<9Lk4G+qI27qbqsNU@YZLxwN4iNcK5=E6!hXi-%JLs9Z0!x zG)L$;R{D;MWm+iWw%1`gmOrZz1mAmcgR--4#cq|V?1QbTU{fk5Ee&`!NIRzh`vX{} zfWjTFEa%Zi&rCm#gR^?i>Jdddi~W8ffIU516P~*^bSr-G#U@<<&a;{w+;y@#U z$Hd;mcRAlR6D^nLg(a;4?NPRiD<-)+54(RD35P!I1#P89^hiR79nXKQ^lFTa+{|+} z^Zub!u0AL<0DM#mRF&oHE`h3C-n0&F^_l^LQ^^mvi3u zJVOIE2uQ@d)gLFt9G}%B|NB=v9{uLlF3k=2;o?#{k*%-gCY)z+Q#+?gS3~~RDI&G! zEvJ>|&klu>=W&tF;*}AIrq3B+IG3+RoAOVAUnasHITG-_<%pNtGI50PxWm!}c^E3?Bv+R~wp@vKuilk!vonkuX7QNC-J`_Z`F_NZTm%m(2D zr|Wzz)YhZ`qWg7l;L6DO*c54B=KFwsh|=U<`bbB9N%=5euXFC8BIjAV*!a7whC?KjgX($b5)~dnDe4I}j|E@rO^y+cFUgb`@RhMp=C%hR z(a7_%!k5dvK+e)h1&oi%*CvA7K8~d_rN@S0J$2ytLnhuOk@G4ep_gXbN31!EfIEg_ zEN`+C&|7GKCSUs}VT3i~8x+eNj3bXF^mn-L4R#7||3=Sv(~XsJtcY9;OawpO^u$h{ z81DvX7dr(mJoqo|G7>93=Wl&xi+@8t*V%xAYjCzpVW2iKs{s!ZEOo@U0;b@Zo8yR@ zXoOl|E_|;y6paLgW#~x034;Sfan)|Y*@aRqM?T^BjW^};zp0g5!ePvpy||Gz|JnR? zd*xo<4t&wCBZVw4E~?Jfg0^w2fm{yf-&_vd3U@!ZThpQylT1$+KtDX%aMZ}H>GH?` z_UUBB)zAA5A@INQ&IN9XZWfO@1u+t=t<|SAgqowCgMLos&g>B}TZf;OZuf7!zoGdp z>x|C*e&^v0r{a_(TV$zOCYs*|5;F`qwpAlB3 zOBxMu@e$#fx`|e@_3-d;Z151*czy7fFo2-idpvV&%J!9uv$`x&T=Y_Snvo(eY*`R& z-nS3Bz(jSc{0>M*02vZ}pEuO3mIHu{3B-I!fW#bN_9_8puYJy}wT7BC{G=&FfDqd) z*TxGKdIVgO`mf=;KbCm{YWs0#X#4uF{N@nM1>Q`bz4MKmy#oQEDfcN)We#72owRw8 z$Y+IMAjFq8>-Dg_)jhcAKl%<1O#A^~@)*Hgs(FZzNweMW`}rD@o^pmUZ)Nr4Ep$)tsMzMRzH`PO5N zXVVLCf43)1pN-$05FFhyi3L-)ryA|w_x6<)K!U^u$i)BGegwM@8e|Xt<34tyT|e4e zc(cEMe~a|jwC3OUwJGuGZ4%s=;Es|-FLDH&Yap)#ewhXNiYM+VWaq&t3_GH6vh0`m z?qY=6@xP}bYWY1yS%zl_*WF^X1n{xI^$#zHn42vR=}{Wsj@~S_H!dyxVZ0#5!n(H5U}ef_TvM64=b2=MUl;ll=VHVR-T{fc!vC0JGTA(w>FsE?eUI z%>*_uaA&O9mpC=zCmVzSslur9&eH0cg&Bd zHafxf08(q+$CVAOh6{_#p>>tb-*Xt30!iF=c{Kr4FQyBNvQKk}mS z6)xP0w+%^Ne|6_)>L=}gcAlOCKK971V@!^?!kKsgsSV;>)FOyTv7@PI|7L^`LG?*4 zE@%GL!G$?K0-#btChdUq9}g~yqA;n#xc2_6L|bxl3_@VOzwgu5_EO|Qdbz|zj3>&o zhbG~v8kg@f*2;GLst;bG4Z(20m$zS zlb3#zTYoi`Ki>0pp%G^ofY;g8)b>paH;47CkbL9R)qLcOPn{f1YcQt)4)76A&Q;F- zOsrjq@>|vOpNl6WLOulOxA*G8M6Q$6U7wM*`nmrdsx|X~;}>se!Ioga>zoV(s_{S3 z3n1b;m>;{^;3}j^cPsMIbp0j7Muz9V2vf^n?53(_QfG_)9QU&!`QXG5O7^b|4*c0r z(?jxYfhqh<)$1N7HZ?XFA#1h+P+6j}|zJqtL z{caz>~AUzJmw zp3nZvL**Y?|3JdW=m%=b~kKkBKFNQ$Ct=v|?8W zP@Gl(G6FA~fXsOC7|vih({iCSzW5S98s~c3HxRnmXxM8m`vb(?d)9bd24-2XDwJnd z(Tyr#6`L6}mC1j@HNW?76grN68p9~fXNJQeZKR{vo(mL4oHjDL$A-4@V$?Kj;ZyD4 zK!p)=hcHvea$4C`Sty?Y3Dd&6uEMQuL$ZvIAH&hupND$p7TLN~q_1a)rw^Th&b}07 zNtn%Wr!^Ge!Gj&pfI)Ul?#>Mxw>L#LMt&h7t#x62+-6;0V;`#W*4a|3(E5qm6dmVQcN_wWX|7BL1MfyNND>7luMQD+1tS_3%}@ z0cQ(Rj?-h=OIQwH?&L>(FrN0W+dqPzQW7%1_`P^vz7X=5lWnwzm}a%C?3NT=m6)HR zs>h|?mfzyhoHv>-?wxT( zLdCD(zgX0zllpZRzME6+`lc^$vLVm0cRZiKgiI_>H~HF5#^h}gq>4+JEH}G`cNqvA?Wz=#88d0i2m zo!Anqa<>4EkBl0I5CfSn|JS_Lsg9-un@r^kW}w~68qA|WA@Nc0^h9*0T~sSwZdzw8 zBQaB055C9Jv95c8y+3PZdsUoIj+bw?S47BkJ?AMeXg6p8)nupsByD~t1uqA% zC6g;BB>?`#o|EkLDs|OqBCmHh{!O1!fKeo^%WjQA_3PcruBjN$e?5P6fs<+5!Gc?) z6seO=&G|H~@z>cfl&nT!1kJty8(lUVcvzxsdmWc+xUTPh5!1e>K0X>0ur)lWf<4b$ zhZZL!eg@ILJeix;1q7PYbR4dc$5bze(vWgHn~w_5S~qr+?1exKF>vJ~Wjtu@PhbRJ z!eD6%C;VCL>p!PSq{~#!>HCtaX76b+oA}=cwe<1f=v|doMB&DJG9GWudi#T>L3a!! zh-VF$QmeW!-eD0QVA{1`>zBd0(lNmpUO)q_a_r@RH3iAx!2zM8RuZcLXlfze&yYL1 zPlt+~^bVoKjpUU9l~Fys6Ks{47bI6kNuLDZ0*}>s+}vF(zc;k>4gk`;>qll zIJJJBV>@%f1V_0pDqK&rxAh4;ivr3Os%A(11JJabvWAzyb-=(;zHM>Tqn8UEyjMyc ze>*=gi10sqH)5b z3k%6=Bmr~C0f($8?(odHL%kR$y%+n5Yy<392Wiqrv-c8p>)Bz~{)&T1?E8llUcE1H zE*4v_TZdDeuF9PKo>m(Df}IU_zje4v=*R~9skSZbyFCSby}dCITmkt#J=_%02R)7R zOsem>=Vk0MUL+7T-wZMBeXQt7l-#NU=y1IEJ8Hb9`4YQ2{C6?+l3&&)Tu^px5)1hx z)oeuw62%{6#60O{`HsG3?ER(;m@q50>0$JA$JpvMUXxZ3G-cf0) zaC(v^IjwT0=qYJP^w;W1P0?D5gvVf@z^ZG4gtXYzxwOb4HxBo}P)lu~Hrkxdd~6;o z3-Ya7J!kL?h5H@P3)6B6XiZ_RFu%t3p+}pQ(_zCr%n|@6_9s}s%$T{Crb7HVX9=S! z@n^+Z25OVS3^h-N(fF&D!mS~rC;o7JkYat!y!>HDvHh~4HeZ7$Q z=hl)0;&+MdD?Z9SB>w2;gL|Ec5IzGZhn z!UU=U2g#VMvWw7D9)3+=e>1v%q!5bkMz+ zrEN27O(N+o-@)F88~fqKLX?{av;G_Rgg$A)+vst|omln>gm47$oH?_jNY%lM&Uv`h zv`&~tm%5*Va{AaVnPh|r#tB$1^}D)Di{W>rC^w7Qcor~2gVX^0k7ym5xexx}%8nsG zHnZ&;9{>1a=stO`$&u4UU^x;eY>jk%i9dKlGA{*R`%`|?WbQj2gH~bcXvXsPq0z{P zk8^FPwx5myKEY{3^GILeN$LFo#c30+@%Nk{cxn_f?V*Wev9to8#Vq_x60+=W&wdfG zN^~`FUtRL`HY-mvZos|p+53TJi*o)<-bQ8Ji zD>A!@;&h|1gn7U0uOP(E1~u5uj%2M)LLWwC|H|d?q?m?AWho z5xVH;0`c!f0#`Mwg}*gpz#8xyB4p;H%NK_wh^uc!9}6|0TzNeH$enWiibS@Dq%0QG{i-O z5Mm;8hp3-jQEZBYeAPJ5WF%v^;wWSf$&c_ZMrQPi9+buUd*eK0vfgjyP1I+_&KC2f%Yab8=~ z&s>AtggHU>nI?GPNFqMY&ccPLKVUYei;Y+u@LB5d#3yY=4T z0x7&2Woofqt;)i;n!upX1ibFJ$Ft~ubF>fcL<7_2Zr0u2NU!bwvf+PbK zPS<3g>OBZhLo~g&?Yb^(gXa#?hTyvmhqmGwQb^+FPD$t8N07B2vGX&G2dI(MjYpKU zaBp9+GbHSnwVb+}t+2SeJIhop*Yh}&EB$Pgvuem){Dx`qXfen`c<^B{Ip6bofY)c=FD0Xhl4J6v@rC~m78R_FFB94V35ag zZhn(x?&OxWxK+JplvKWRm3;Ae4uC;Q7fw&|j3T__R1P6^jCRb@rSVwUA&pewIVlVG z-p{UXx~ya>FoZs>*&`_4X&R)L?q1pKM)KUuFn#izYwT7FcBcsWBPMegEmZPL z!V;v#`=|nf&LAtl?n3QbO%OY#a*ujS_ziXFsFr2%s&HkS*;R;-m>YXh3R{YJXxc~K z(r>vpWcP5LUKMNGB`I%N2l=~w(-GH~u=E%G`}Aov7T%YSAQc*lX@}_Vbkto1zmB@_ z`$XtR5TGIXh|>as;7kf2)tSHmjbwVa*i4xQrA+b3?q~huRD2nkkXbtjQwgLxr=@gq z+;!ZQFM>E92^YJ7OsBWqM-D|sq`Dwp6!G-n;FPrHu(>FY%u>)AO%59wTiqCNUh1M6 zG!ZGBJ8P{4s?R+XHI*F8#zPGWcYPNQO9(oj)5v&@SpPbnKA-(qMHl_ps54*?P0o>P zH(XDHT!R9;DzI?C z9H$U5Nd!VS8mqQgl6T(?$Pr%?kn2){cv#yv&qq~hK)>(;-+UM#K$IS_Q^8O zg+xXWlWEi#>)AErXza6I9m$w35td}CHNm`Kbwyn2ohtM!8QISTK1Ja1qSleP(+88o z4zr(J#e?Sh>mkypuH}TXWS)ZGauSkZKkX%5qwv{v?coD=c! zE2%Ddh{Fff_nok{Zz=}-*oD8-g)0|_z4PB6`d~hDMbGyXu>EtXMmuq4*8|CB>@(iy zif+gP?u_?qn#HkA+axa*wj4~~FHAA5M*N2FGxK}q_o-pt%Vb@1`CU)K-4@o<7=Hh? z2wWj1=ARU3{mrtY0V%$uLpdE4qKC2S@9W zjEL&C1?iOc+aj@|hglL!9`hJiU^8Z|_Tb}c3K$Qp2I0p7h$;e3=n)qs+bO~u`B?r3 zn-+Eb!FUIMfA}7m&%?WH-(!A6uW)sn5ES`^SVu$oXA1lBGB=2P+H?rf zV(YU0lozbU{=-fJMi~Sgss=hTIO-k88})gnLZy>3t7=XP$N>f5BpBnPLOXOV*n<(0 zWE=SP29W&C!-}Qd4u_2ENjiz&iI>336mJ4Y0Q_X)MKCZ0bO;@=8TQ^u> zXOetjvzu-mU)Y`M}|&L8mb1_bW0jAd6G0&eyz1 zBXo37w|)`mswVmf7!N`iOQ^gHoLul}C3uF{8AHz_B%gZ6fOdTDW6}A0MQZ;Aj{^Vwsq--Z z{R?a~Xk}(Q@1k|-IZ^j6tn2S1$UB?d(675@_b_0i5Lj;DN1K>p6*VTzfA{%X8yWhv z_WNn!=Zex@(akA|o0sysok+g`jYGso24p&ZfO47FrI@5z`*B!RM#8j-G4%SkCP}>* zXdxn>EPczsUQwnwFcVZYJQJ5q&YDb%b%=OiY27z8X;9%Zk*Gc zYL+c8)O+g-Dx;^IuJIP9uf{1~S=6H5*7fZtT`M(E-tje}V6EVng^)F`vsb2Ph?}jr zmy>%Ezi(-Au`SLPByx&wsk>!X;s};br0I~GT0*hj_cB;6bg7dUluILK%%s?XQ%taR zGs(U!|FHCB;ElKd>!rYUg{HWGojJGUT8L-v2~cLNCQU7|*-v}8^};3CtjKGzFCB}{ zGn8On*CTY&(fQ;55%FIYMCpVEz;|}9!xY757vAUNAH-cIA9-%OP;;+75ND?Hi}-gc zsXJADt#{qdcWMyPm5oNY-@R}U#TE$TgI^8d!!Y!2Q^(|ZyA7q)Icm-__6OS4iprLn z;DObg^=Qt{pI7ViUdru?feIpPzR`U6XHa@?(7y%p105_Q+Bp-S%g$=1wnnuj$C1mj zNCK`MheDcvWBvzRQI!!V!jdHB#~&a(;CjMa7;a6b^)-hV&}Wvq3$Rr49Fr15+{eFd zP||U#F{uYISI>IS@;Fw`AB@LGXz-rN0j17Al-f|RNUG7G%EU|$@pu0`VW*hyC%JuT z3W4-c0(3b?ncV>_3>%}Z$KUyIs*<4yF{+ffs|rkX8*gB0UHGBY zTQ{}Vffm*ttJE*4KW%bQi#gudmdsEdx*Hmf>^j`P3^uIgo!7$mOS*Jb2eA9`4F}o2 z$=QP>mhjeHeDTTQ-kZ^AJ8#`%<)o9S(?hoT%bZn(GEt^^j&3c1rjNN{@7_J+WvEh2 zG@;*~5J@H+I=>G)EYpoPfAs;bclPd1QeLm1^B3#|nr>~72FuS#;H6Cnq@8$sOa4HC z(Qx7z<6n@YFCnO*OnP52bEL} zPA7gE0WiQS>sWpm2blIvZH8Zpg~{}*%LRX+LPqcrMxXHSjX3@y?YEEjggXJqGKL1Tl1NajH2u{`z&Y2%$*!OSm1pC!krs_DD13$-8 zzMnQ9ThmJP@Uy=ad-y4L!9ZP8F!QVC!Xl*}wyXy~mQZPTFK=z1+J6sp(|L9);Ky+> zu=s*i?lTK(g|coJSscOwYdFK#O2t(442p3?13eu)>O~&E9sWxPo*eRMJi|=I0v+D( zW)HAUIkQmXcz_LcH-ycxSV)krk#*O(@YgW~HWr@#eYW6$Umn92E*&P zfoVUt&bl#GXWg%xVn!-n0^5~h#`Ng@`&{g8N=T|$M=r$ks&b&cSRRbue1nkRM-=%q z20=f#L2iRNIz({_&u{MHLrS+UC`+5K&k`*xh*;88Ov#K<7ibLP7lqMCrL*q#_AI(66mf<>z${OLfDJ8bR0lU@(t(Vf-J>+JX( zM6X!0i9up{&9m_~KmGyLpwt~5Z~`1s&@Pjr6t1Gor%5zhkiJCZ0N4=|tM21LG3PSI zaBs?L61ybj?fhPOR4xA4X4@4v;wM|gmqcCpC~8{&%1;(HEWUeP(uT_=cFZq>BIlIu+hAAgNQ-S zoM2S&Rb(u}Pq4fGD zY|8hh(Ws+d{A7GT$J1v3)+aUpBq?RhUYF{c=eG6KJ%79^+g%i&`S$J1Q|w^6dv{FOIh#wi;Myx&_FFl22;~{668DHm+(bQS}HI z_up>_O6{_%Ixov??Xxdt7wkK#^m`n0jrXB{b4mRJ6_A__`F_IVp*4_8y1LO^lj5IV zs>R!C{aeNP#?J7kONyIwYvC-&SwWKO=5awm;6}^86as;AxBGF%f3d7W7Z` zqCgv(vlznx#*+*(;2&vGR(dU__Bv5vi|}ibnHdL~`6KYi+{D)f+mZ(5sr}^~N5j z$3>HiDsv64@0=YpXLBL&-tgHh#l^%LY3ugi7)Ryw-`K*2XJp?g5phlPx*B`M227J) zsTO8zvzpx6n2q{qx%kKCrzD2ey>~E5z% zFx?>?(hU*^X(XgUS{kIJySuwXTDnU>B&1uqTT(hD1wrz^@V=k-d4AXRe(-$pT${c3 zS!>QQ#~f>pP4jLm(>L8V&y(g4?q8zp7YO#~E!Bf?cMn!I5`51+= zbQx&UNwVF-Di{&=>`rTOe9i3%D#z`@6u&6p_|6w?-e!OZLEMDi0TZ!HZF+{GYG(n3 z_sqc8*OgHzmQ{k~68mE^P-{>MqwXm(=H{fjbNIYs3PWA!>97yZIb2d62#6p-ICmyI zTAFac;~!vwmBXO!vkRSVpzf>PVt@5zQF>j!FwF7h9j+q-eOQ1%^7Pcp`>?2A<+y)_ z`*`jpbWj{+3p2&v-SfPsr14eVvH9lLS~w56(l$_y#TWLIOhdZMB%y;5cFq`$pwn^Q zkC;2ELS^V3Sh6B6OUJxeoYl(Rtr6pzaB?g~@T^K#pqzBzJ*1%BLvJS)9Hl@l*V5+k zf?NiXzow%MD2zM_xZoCf-M92CJftT;j#Bk=+cP#VS{mlaztOYqm6Ulz!<|TMVz|ZAinIe1_9)OCn^&(b6^|}?P zuc&}^Wp47!cFbT`+HtHvEAs*$UHKC~Eym+L;F4%&<{cSh5chnDr=|VlvVU`@EihjDq98i1pv)I>(X?bx*#2s$`TRyZmtkVOJ#Y78M&ewtuWYpVzIMmxO-)G*zJI*DjW34WHmI_;4?dDzC>;xn^2J0RdK!|-l6gP! z{VDF*le`SWU44XUdAxbZRle?c4zk>*qT5U>*;L(^nxNJMsHfW5GYy(q!j*a9re~pl z*5z?~`1<+qR~;Bxf_4PIqA*8;{UM9zJKHVL0=dw`T|8fABxU4VN&GHNI&0K`aJBnU z4ojXr*Gt*AubfBl4bmAN!jJCBjYASF^hbKKXZUb>&P&46+SR!mn!_QEIMCAfl%cs# z^bZ3;L9HAg_oAZyQ26z%j?Y+;ljjcKsb+$RjC?kfpu{ZGsvvP+hS z5ZY~wklI;W0n3sHP8>PjkXek!&6P^6Qfz{)rq6}PT$|;0)BNx;aX`&10fP>VmRq=aE$fjrdD$x4i?;=Mh^yU`-G?NktqOK)cplxK z21%N(D|IA8=zhGs^&aZ2Suj^Qm7W!Ood`tjH<)%t}!0yInPjc!rZTK zU@Wm;3hf(uLAA-?`>%IFY4VbE;+}uQ46K^_yXQ*T*%2 z%Z&1BKyJTw**;#JD9ln>^wi^^&&6mbRke|xO=k0&{9LqfiMY1Jgw`%I09B*`sw`7g zv6wvCe*zAY(`n^R5Lyz|^CWF8(TE%+Y|DpC2*!t+|6;93rpuB*up;9zWU6^o{?ZZuRKThzZsoR9jIDIZ>rQ+}p8>wnsLVDjSN9 zEWebO?3v6Md4>0pITJb&FLus&Z1S6?hiDpZM&9yBcDek_Ysjv?u*FdJ`@)Ag7x6>u zF3BkIa>5tg8H6vuZy?*^=oX_b;GC*+P)xXVZ5q8lN2P9Z>=#gdUgSH$gM$j{Z;gGh zGNd)DHV5bbmPMAkdf^QC191={3>Drw#}XKsXg;Z3hyo_`Unb9)K3W~m{HL%;m!ag! z1MfD%8`|xdIo@d06Ouj(%$>sFBtetIR7`(UYE zy)P>a0^8u?)h7~*-VitNmA|AFr+Ig@suTC&7*E?)5ud~JRMeARL+ucUG_bykBH8q3 z-xj>!ns-O|r{*gkS~YC7HlcrHdJ2I1kg-X==xJCS63ML|!|Y7&BIh4P$;y*L3ZuDU zWK)GMBz+-n7+_&xggIP<*xH?JJ1FUyC>WZ85= zOjK0<;W7Tr=-HZNdd|Sa${aKJ{D@>Mb1Cy6zlI5f^`A(6&3LlBb!&xg{*S zl$a3Hf*cTk6?heYTv~ig-?Munrs2{W-Zpw&hG(%zH6zfaP6v-O-&uDFr8<~cK}pfgTNe2Fc8zl=`c*kS-dGxBhZ!~4Oi zF;eM!j_iug8O(G!DrSPGnp?Pe_JKh$5%!-k%mSThD(S zv++-fib0&NS?9CY*`87bHug{sdT(VNYI;ezHVoMK3@S+5wkK?Qio^PI=Nsz0?*=YX zyRp(rt=JTiglV_ z)1gyR@S)Be*REHSxS=p>_BpSyul~$hFb9kOHY;&>;S*ebQcioU!b`G%E84c)5=XGu z9BjqHZ3}o`xhqDJm|I>!NSBYe(6Ur2)3h#L(626tuz3hkI|VWUYz1uWktxgSn*-Efgj3t14-+w#YO(^w zfSAFLRmym`cJT}jCO@ALz4w{6LGA(-9iCJD)c_hVviWkk$tp?=S#3>_~YGJ0VO1w z+0P^sr2LXIF5}x_$kC~tVJqH~xF5I@KSsqt+`k_7cK%{h>zdxrV{(u5UMn-|b3VFk zwt*8s-29d(8#{t3W|V8gg|J9NQ>S`uQ`79Vn|rv^-GV&ZJt=xf!WRoy_ta{t9GKQ? zj!>9Z;m2TC?0Z{-y*XdYRw96k&skgay~9qz(-JY=&m1wmDr?LaoW5?he)SS`aN0@b zY%qL3*!}Ksx9J3R!c$|Yh>yuZTbsPHN^`UBFTa+K3VtLzQ1XKDWMn~-p-t^fFA)qd zWObeY)Kx+&e^bvAaowzd+n7<4Q4^U1k1^0(uHF0DFHfW_oIjq_`&CW3&4Z9aHu>E^ z=Tl0Gzkyuh%NY{eiKXFVNO6ZG?UXhuTKG5{c-QlaX7*s}tsJIwPx#c|L!V*0V>TaY z>HR`kO!KqZEsx5az}b3y27Asb`O3L&tMHv;JbnTCzTugqRJHh|e{W_%eHw@H+3Qxm zwl8S|)9*DZ_2$1vs)to3XJ-egv@15z1|tn0o9`$PWDgmdfkgLxA-QE=Nzq1HbKV;J zeUL+FT3_zn?-a<3!P?ohtp)75V;7t=-K!g)wnv>u-S-L&Q%1-4@z$g{Fc3G4egTua zpDM>isFQ&)T-aGXCL_|$q2bS#hXw;k0vOKM)o%!`z!@U?Ph?vLKqAv6l1cL3{ux2f z?)iC^eA?S7gE}rjhE+@p!WKrO%mS`7j&B(T5c+=b@nO_JE~0%7mF4<4rs|+zd{ytc z_al*zf~x5wSHTn@cD^(LV#oS8-7;G>)#?)$l5xaf7mv|f*>mp(3(d4N>q}a?#Anq) zzT62)v0p`{$`!}27e&9Q^YUw=iV@0I<2!;6(d_DSZojfzkFt`_o=EEKz8!ydRkCxL zY24o5C+C+)V{Qiyk*|MWtyNE9euv8#_8?au0i6xKR?FAtXHY*3k?x;_rgc5~vhu_!^{#pU?5J$CJ2D z8T$~1R+HBu$Plbj>~ATN(&#p%HVd#&?-jd&=aFVfp4+f|j$MX@6kpj6^)VQ(=7I%& zU@F->vK$3ode=O_IkZAVWNP1#B_8+B>jai;VqQxlM}@*(vDYun@~5CUaogy<==1UL z*3k$%`Ltnq#mt~e!j1`F8*}*gp|PFR3|~HXZanA3-MEi?kr>0V8iKoBURb`xP!d+| z{`FINu^i$n0X@o4yS3=bxAA8mmy650X5%?=yAZs@t`i+F5+bgW-B(z00W1W}5(#E#$ zE$q3+Hp_#HuVcZ&@$ToEMXR$F(z-SH+ez&ivN%G|@XKS<`ZAkZ@f~}{(w3^3B?Xf> zdo`<71$)L2YI%^4T!@GN}9B>7i@c&BjsM7L0Q`hXA4bmSs%c% zY3((Eqj8U#zqWook-2-D`NOc)-1Em=B;h;dIf6{im+2o)3Qvn%D^*>0@CxwpNQm)%zvN60*n^)O#9n}yfZJ|(UF!ohaIV$Zge%1 zIR0j7LN#4f;tFnJ0*s=3RezgKF{$cI1NrCjSMP!pq?qCmVNHL_do8p*!lps~W29@Wo?h=x33Ezn*JElZHy^c=7OLjfkr zv5C{Hs5@0k%^fy|4^2Y- zBq8lozyznn2M|u90$$p*TR;^LT(52s@xl8eRx-A{b4RQOpaY6^$O1h}`-kgyH=|6b zAD^{fd#^pOXNGDZhP9W4TFa9~h4`qGcdZWW+kdWphV_#~FZxJV+Aw@5`TFul7<>T1 z>~BNu74s4qQ*`znZtM*=XZIWxg%DV~rF*OnRsm;f?<|t$fU3h{`ToL`-e2LXK!f(l zMGWPaaF@*`{4$L()5AiIP#Yy){23SQKQRD;|C*mK8a|$i{Pn?(a3y1MZ+J z2lQzn4(!WB1u?em7tH9AR|{V44V5HT8cxgzP8d9z1Mc_MT6vl`BX?9pAw4EPC)EhM zwPA|lH}T+5wqSAP!MRq_7+8rwRp*EgTV=x=Fc67($*36$(07=Sm&SM8qobizT;X)A zO01asLnJZZ$5S9XU$gVkcPMq^;5IfSe(&B|Rev>fNNnx^Ag{eY)ZL=FyIa-I_VN}- zVo>O9PQCV&ejSwMHhix2hLg@s9mcv^ECwC@*C*iLxXb1STXFUICp6C>yZjQ`v{m6M za-sV*#CREL3JZ%RB|i@~W(qL|!#xrVQoQrJe2Lax z-YcbM$L1Z;1{MnsXSDo3$VlhVfepcOJe8jZNv%%){C{y?Be=cp4S@(P)#N%&Q9 z>Xn%2S4M}-o)73mCpn}zxQA)!I8i~=h)J9R?|2hfuNW_?GnWz1oyoIEKDCS2rbI+c z1ra)+WMMqRRQCLCt~|pqO|Tx*gX<}vfdTtRNx`iolP^XvIkCq&dfM>VZKcjJT1K3W zI&2b?m>{+102ss)Niw`d2IhIGHy77q@|_ujN+X- z8TBhkICZF27m{5S+auU@YpLIuuRmljGScFDnj>~sWeuTm{p7D>zlL%@J%X~jdu&m5 z7)P>FD<;2#;zcH^6n_4xp+R0Ft3de^WoHEtV2ynMh8ZPcL{LEHDOXWcgquE5kq))P z{rKHhb1C9dBly$*1Y>O}#}+4Oi8@MuzH3)VJ;&y`TWSTk@{{)GUWfeS>NWC6J;X~R z7DH}V+6fv+?Z7zTbLe8$^a(m*bhO?uFub*h^=MS-l>_|$24T|sqURoU<&A$;MV7|F zFt!i9n9^-aOR6CDCez!RrFrfFhvm1 zEJX}DH6%OJt1mtK$Oy_i@!?iny5Ek%JbPvjla`pWi=|;w;R>wQaV6tL@jnTMZPW{Ir zL(%rRwZjt2gD(jA8d)YK9CPZBhN|7nHoa}Qg`aVzy1V6tNbxS7rJxU3Zksm$l1Vomi(Cs-5;$5% zl(PsMo1dBFx-IY!7HDW{il%$eO}o#86b3?5D0KMvjkBotDGouiWFS)D!)zdi?)5Zz zI!)hX$TreT%^lN*2+;m%hCFdxT1nIV)i0dh>Jo4hxfQI$))?4)XrrEopTyBDc^ky1DAZ4@U< zG|w2rb#g=$6MA0=SviW)fXZxCq^o?kW1wh^F#OLxcY}(diR0?H4K@2iM4zMfXH*8l zb>22iq3E z7cR38K!Z?#KBaYf#?on2Be7U3ij^&_8p;Ofn>dGUUZs;3eY)fxBZLAEw3fvk{qJ~j z7F_Cqnn=iWmC``f&kw_)hX{Eg6(|8D4TZ49(_q}(llITN7~hFA3=alK3R8}I!;m0| zmB@bol|f?4yD!(RHNZMS6d?5{SJ)r%;B>e|AjlCE_uvFA53eZ18a6PQ8Vg2bSDGJ* zUoaXjkcu2>bWtm9{}9$np-EnSt_=sUGq+SB+${Tv_2$NNw8_*H59)qPKs%x5_e%fEr~ z$01{gzx^w6v~E^GKk054Pb7H5`QtK@V)zMdSM@WhAE4iHswK-AgsQ5;BEp4ClrQXc zpm>z#WC!?KK7P0?Y8$vffKUQ*2M_~!&`TUpTx^@6yKE${yoVTHJsa6y{WIviUq%X^ ztqxXXUg2!;a+Il90}iOwa(|Zrhvz?CBb90X3rv6IUKSO%-iKK&rn(y(=xKn;n&wNu z02`bGfF2w)h`?-PH|*M*fZNV=err}Q-Q`4PA1tBSE(xAGG0?oowAxzXGeLX|V77JZ<5l+ugL@Twn)^pM!Jn7oLDmNCocOtg z^@Xc!o8fZ;R|op^EE&nq+Wm>1+KUJv%WUSPmRNpQJr<_lgi~q3b>2U_L+yPyybbER ze`o=r&ZHtR?wE(;p6$LSR&8sJ8^nKPd~gP23VM3qkgXH1ED&gXaHyUQ!)Se$_J)L{Baq+&hs!50|!2B$=)InH*!q540hIu zr4cDI=g8NsINYGkV>%Xp5;zJ3UHzt1>G{P{GIYm8~p7~M~DJ*y_8l4 z*RH+*5wlKA*GuFG+BW03BA&eNm&9n*&KUhKO(r%jUbgZ~33cvCe^@7BHKKzt3pWmF z;pO`o+7d3X64LorrldBT*o{bV+9+%1TBwk5Mfv~$`5v82ph;z^%Ai~R!T-ix8Qlzs zQftlek`(xsd-O1rIN-;oe*5tP7FJsOBboF`fb%!yByp&H5|;R}XMHfz^kHA1-WtQc z)etvNe5Yd`yDI(KH15%70Fy%3eGegC0b>OUG&FtLi>q}g5Px87No|)(`M^=UHqE(0 z@wY~PfUJ$ zcZ>1S2E{ns#K(GE5sgDWc}okMd+Q0+J;RnAx*(JNe3FbtWzadrr~SWca{AqTSPD~_ zk)k|WY7URQrLGj38+~|XXh5FF|EPzAQn7kks%g-xtgfvG<)XHIL01!u)W=S%%&927QIRYf9sQ92@ zUz!ZskBPx}8GV8@N&dIs{R|XCIs2(O%8`JvT8kXKI5eYZYkL0mj^~{HG9BQ zx&G7^L1mTzc89EPSq*uYlOL!$9W&enKEFu7cz+4i`IJh4pr{Vmyv5@1kTshz^uL61 zIwdt|cc~t-p5j=CHXX|!>y!hxA2p9yd=(GFF@+fLPr3fzyO$#!$kQ^d^g)t`a+-1y@wi;` z`#(*P4?5{QFXrLRjSfaksq8jhpn*%7&ynXyD#-KM<)?r*bqtP4HbIqA8OHZalhqW> z)_kc}Cemt(HJti^b7g1XCfKuL%fjR0FQkKiSH6gErzv4XE5Gpz^qbJgICeLuP z*hH>~}Tg0iKmL znz0XxN}U9rX&Ce(d{*vTv_tXoJ=Buc6`>^#314Tw$wWHF0Nr>}kL)|XkQSjHZ47+% zZdoA9!3JmGlw2P|-u+jtDbntF`+|AXrnANoNHwK)q`(i11ob40&?YJ5N?)}8=!NUw zo0I5>q*wAc4>Z5m8a`o5&r2K2;4XN6bpPX4cY#@m_Irh+Ge`#B1lCV|b!)owx{o78 z&+V#sD)~OL{fgo6IsJA$%HD9X2#`nRr)#u|c;FIFVw>`OVCyEAL|MXCC&d(J` zxL5v|h`)CWNl3o$Qz8~LkyO4?*YWmUY~HLRVo|f>I9cE%a80{E{PzFzeB9y|IHR(N zE&aRks$c4z_)i#K62q(g_SC=sQuicJuLH#d^U%~x-G#Spbe6-}n= z4-kbTKY^9{7+1|9pODt@I|K;QY)Nll*M(_GJsYhueG%&Gw?_)mQeJDB9#=g7jpz-d9Yl0pNMVSratRSM zH%e*NMh1o@UCuETtYEHBIVx~0N#lnV@4>Q(YnJMWG>dgtC^=a~Yq~kT&KKpT?vR<2 zmr7LY|DQ)Tb?@OENYv@lX@a**?ens7oy^~9QcYL#tkp>`!`pp#CvvY-q;nT`$)AQC9KUe{8l%bvGR_k(Bz5+G=B7~D-;jr#tgLh zQek}l*${hn{`dS3j_DD?luT)+fesvzvxtCtvbQvcSK#2w|1N-`!rC#sVI=N>#8RRm&m2>V^yKnbW8$iVQz?^nVF3(CduHc1uWqkz#j-N1Dk*u1oZ z`Ax`)hpbwt3tc48hrj4=aa-%rO#}8r+<>D=A9O8YiRwpbPM}Tu1YupV?@i&|u`H~__Q}LkB?mo?{KHA!_ z`_cm}q!LDvOuChQ6-7;l!j z8psrTkyTpU2AfGc_RhdPzyEvBa^Rk|2}Y0zfP0blIPko=b$@!(r*aYWa=JhAC2-O$ z{QBmfk1xai;nsIRvU-%_IRu*ve5r@uyxAqv{KsN{t2gYPY1k^Xiq)*J@3HF43kV0x z7cSwy*B1uYf8BAQqxJ$8jOYX}CCltT8!Pewk3;Ze zuSA2XktJ5B*D#RG%2~LPJonDKcbwGY)ozax({QF%>|jtDPIVG6Dbnv`mi6ewm)|R|^P3 z^8zz)17A+)T*`?%i8h$pdcuBd_@UDy31_1AS42@{)w|-(o^PBPp;(Do4CeR1mOgj} z28MO=J0!%QzocRA(vtcrPOs4SJtvljgRwuYG@V=_sP@5-#D9gi{iGwEb;fUQ4sk8d z9cFG2-j9`fmYh=_yOUAcj@D?h=OQWGfZHhl@23B`d>!4~8o+oKT;|kwku~))Y8RO6;n+-90BVcIBL;507U-mWB$Oje=D}OHx?u(B6e2NT@WDwo= z2bu-sLMk$DW{62+`dpAY4~oT3D2BNxfZr7k$wg0PSG>GP0`2%@;w4XDP4s+wwRu!39#M&1 z&#aTxf9`pX$oMkc*EwiF_v?)qqkwh6&SP~>YH& zlvWYH2}HOkinf4YQOqe0ta-QCFsOZEjF-a{zDL8SNLnV}2*O{!>lscX+eLv6?%UWX z{vz$ojsI1}D#CV269M(eHIh}2Tr_!7Rmk=rB+P~rpWx}ELB3(StR50k&z+Lz#)*Hl z=DtVw`A?Li1#@NF0Y4ZfN272g;z`3CugpN9+aqF16s+h&M3z$)O^BrwYUNH;`=wFz zyR?ym15}>bhihx*H?8}v0%N`v;(J<>YsG!o+*6t_OP8z*zPPU&K9A|`efqGuU3Ph& zI~QJTaltHhV$RIJDQ?xdYxY%3zf{fr+FvGu} zf^4m7y|We#;TGJ3d};JfhCWw1(N&n$mV2t{_W5h6+5~s*PZ{d*dvhGR2}F^cp9FUr z9}2PRKA}mQww!}iOq?=YphTRaQ&JmJKf7P{TWv`I-D-~%JqlHk@AJVmS5C#80kJv? zHr?g}SurBMhVJaU%EB(ymGGK@N%hAF2HXRr?`svZN55cd;vMs4w=(CU&mB7 z2nWSSNZUmn25=BL^xvNo#H+1EXVcIH2{;Q=KyuLbS&jpT9HM^lAEn{K5eBcWJyZrhN_ctte}lj>^P z20JDLqb;Z+c3{r2lu!No8#zRCz~0g-672DKAK#eTc+K$m!}I?G;AEcw9FIwhezT-v zza@)tQKQd;Gn)gN!gg^Ouq!f10WYYF`Mg>If~9!q%y{_Aw~48`vUfnuH5KD} z+}>hJH7&4_A&+v01{P;vr@(-DL7E25MJT&m#>N2;C{BCwY5W^U54-QDR`on~sHCg( zjoM43oHvNSV7&Vz*+0uwa|AmFLX>~9dtlpsz7DDRpP7UqZGHb0vkDi89NMD6f`VDk z6bL22-mV%0;Akl$q@pVB^1k(tVs_nI1FP=D*$djpUAQzNqV203g2lL9dlQtJT>ODm zlQcBLI2)A|;ST*UC`V;Y7K|+YYO`+w*GjWp;rCf}IueUc=_s|f3`wA(928y2Rx>hG ziZ?`*KIdJ+nmuBLLOYd)LV*$AY5D~RCH*5BM?w{9NKd#NjP&wakNp?46&Lq%rpzk; zARS@eA(b>Q>eIjFm7{qGr?kbL5Ky#QC#)a3Wk*f_|onr#8qpucXH%A42B^KUnVXNY0 z)Dy(nBQ5u(~tjpgjS4>G{r+o!u0yj3JcX{9+0;B%zW_Wq7$%;EP^%X3|q9@IS; zDZRY#sY5WIrSM^a=@LYENPZ^)9_g#8C{jU~`+dXRw~w~y<}U6&rCKMWz5XoOtXol&r|=lNqbMzC$x4wcI~ zvsYQU(|Qa*6Q!B{J57pa(RLURouGh^fx$BND*y%s8dMp5(wN-?)~MTZkonpLr+!Kw8^cr|5N9VKV4gkhy|>`dGC*-{ih>>&K<5gV^4Lcf#<#j zDOkhcFH}xDAFE`9JGZN{0B8yp(Jal6nLkm8n;&hcV4gPYw;4e>%s#%zd{9T{Pp&j7 zan>~Z?<LSN7Q(n4R!xF8>$>j5Kh~G(C4j z@UlrDQ4ai$*+Hw?VA6m04EHOax*fKi#=n)-R~Qz%MVtNePPUC*vTCV47?Mg9I~L;9 zR{0O#feo!w!LEzP45dG9wm%no{w34Rb|5a)iQ%1{TX834#c1Go=~LZ#`BZmS>y*&8 z^7ContmjG11Ir?VN+WMed$&8{&W}mo(`o!w;#Y6Dbk3NXkhb%zOrZXjwgtb`Q>D0` zLXL`#uFG5uH?heH0=}O@9e-&fZ)89uL4it@Gnz!x8Eg5^d#l**;A%r z(Qwm}#K~=2@p_TXBg)oHW&{k}x16ot%h8?yite0#%KxtZZMX>Rz>S|JrSI8fMd!x&pkwF8IRhfI zUaT$4u%ty4AepL=!`iEyoyR1bqhBSQ&ZwvBS}zhsST@Q?mRPjqRySeCcB z6o_`rP5cX(Gi|G1wGI>e{p$X8IU|@lkGk2Z745akwK119Z$a|Fja$<3C9CUZx9>94 zQnTX0!y`F(aG=$WU8qnaN()f|_=f&3v!y~0vo9&y>x@3ap7`tfOWJ!YrOpwp>`fh= z(YUWDBf`dua@T1NUoiHgXALJezgV(?a6iYgf)RM>V+>vNr^lTewab}v}cMmkN zeGA)RqM@QfVI~hPa+#qZ1U9j_S~-H+oVIlQh`?=`2A{SdA8)0mLu)Uv?%cqV5-SrPFBl)uJ5G3nQwP|6*d2Z$rSxF9)bVKZoMwf)@`c}z zOCK%7matWk^B(2EpGM1mSf9jRrb-=Gb)Y2Bu)jS?rhPMwzoQ)x&rGcF64mhMD#6BN z*JYyAy|VP=sc7+Gi{$PNtKDMMH#0BgVAsh7Tv`0U-!C^1NW;iu(tjtg_nMMaorHYW zzhwe6GNhOHPtk}@Kh^bvQYMQeJp|(9cGvx0Th8jtj|bo!>xrI=Tv15DN0@)|*5AB) z;{d3~qdVpJzWV-2*8sj9ore=r^f1$DyhcV+?bNWcTo)A=so!7s^#r|KI3_S8#6J0M zLVYUKx`&=9?z@(d)fD%-uj@uVh1MvC&AOeRF0)6VRH;kLghE1b|K80CWy=ATIu~$DTSo=}=DMyg4}} zKc;HUE;zhg*;>JKLO@BiED<52yEvcq#fSfA$N2eaRw$ZC2ymelFkz8~BEZw+D%cI(HWNb~#H+kVCt4fk+sHrp(km)*Xr zPiZ|I3d{fE-TH7m#Nn8s>wyxuy=^5O_D!_;@&NAAt!kaQHvTi^N>QkDT&W{h+EaYi z^_Vm#mivH+%SHIDwu@myLu%}8@pXH7m$bt-Xm;s@Unlw1dOgi=U8jM-$XX>x#Cx%r z@Tkfnx{Ce>-Z(-5hyFu)_x1ZPnfi+5U-W8dI4!asHz?!U!w;*gEbEv-T$W9h^6A3f zG?nWI*4l2>uFO&-&bNbEj-A#+nP}|7`!|%ddw(i;;NO8+>v44BqJ($9 zL#|KLKf%xQJjIuv??GS#h8E@dT5kk0if+6|D+kA$P5BU3Kj#omO))viwJ%71x~OSU z2PJQe`u^8JhRWpjs*fj)Z8cfM_#{nO3>xXNu+9cdTq(3nOs_?d@U^=gRqkNZ+&>aH67aF(VaB8sH$( zC5m(OtM(V#o>lv}f#s!ixhD?5NY<>-eqC4dIm^srjlx1V73N{?@!bBZ%XU$f_JvLq z^vwjvo_46?+c+V_y~?iFiXxs65!oNlz3QmbhFi0CxV%;O!J9-F^q~|&)1VxcE(B<# zcGwNH8tgqFdA-sCI?;3PjscBxUQ$aTU?AEv|J7b)w7oV$$K$URCxWo8Z_v7r1e4ZV2ZR;6awUaM-*f+ij;$IW;oRf%$$f~Lt%#~US zhCVm~?=Qgic&f)~Wj_P3G5VDOla<5l0S@hO!9DD+@bY{P`0Yom$ah3a4Wv-mcB7M_ z7lWn5Ht^RR%_#OBD$s;HFV-IB_S1({UGX4&d+Cf7@+l|->$h{9=EHif^C`MyUnf&Q z=!~6u!<1D=I!NpTacx^jsn@=@6l-^{79woxx-qzbiNXF&)r{43X=NYy?X*2#Eug`? z&Rw^l=kVNJWwGHKz25Jl)~~mUOkzrZocL8);~ZaaZwq|B${2&0MfAl_^y!}0Jk+Kt zi#wWLB*t@5=Un7>im4iVM|L8bCF4E!@%UJ6c-pr=S1QMTB-aE6E604dpwyhL^^-Z+ zGOBgYOO}zB&|K6oy^W)^k6_&MUGkIvxd(< zg?(Tni>_#MRC(^q6uc$s^?)^c>AoXCt@4NskvlgdnlOhtX{{kkN8K(9Uo{lW*B{>m&wtQPsMbDr__@w$o{*TGP!np;xkc74+9G`Jwe(_llXqot zyVn|RAX&jES!*ASCY{HeUn?Ku8u;GQHe1iHB2 zQA-z{6yZBVkhd90pOxyvtfu2Mf6(ch_%AJNABtyBs>MxQJ-?$XH|!|d@4HPDcGdL+ zf*yrzuJ2c~ZqCnSWl#n(qv!nn1<>9d>IjfU6NaIo?2%Vvf%eis5b{tve-$V1+Rsa% zmZr*}c}OhCtopn-`&3F7C1ysv!3QEs`;mqw-6;H zEVt$#=d{$yViTCtg0c&XnYsmR?)bbY=LZ`>NqbKzEfg-voxDdsX{-9Am3^j{;)p#5 z-Ce%o;+G2X>60G8OTUZ*0?swxIGEL@#ak(9bTJ1x@Gq5aS^fN))DQRB+lY5*Rq=fN zl7MMh)2EL4{qyx1XP-p>OYOyq?9&iC8w0g+K1-?aBA)xt;KNI4Egz?=pX37H9WUMQ zucst$yG5#6S8E#h8V~5TJljjt)#nj91^6X5xz@gar2_|GS#^hMP#g^Zi!j&IcpUDm zlQM~l&IxM6$G)b~nPDtnyOi{-p1krZB3tkg_}9gY9E7|@o4$PI4;lic&P?V;EPbehWj5$E&@n$a;T5)qx}P2>wP4^f->;{hIJ8P@&MkZU{|Z*{nWQ3zy2g_pXa4v zB`MyqwS91k0#9sPkUejZ^Z3PI=!&vtxSsRTa^+0+2*v6jM@-Qed>c>Th|&Bb8Zgo# zDU-=_gLdBihxyo;u9182K^(<6Uqy-_4@|a4#RBSHz-?fHP{#xn%>GF%-z55|^6?qy z4;MeDVW;1I#`dhbjx4_78CFnRC{LavBVu$Ssrz2a&|@jE6T%?yZFdN*Hl}eHtMM?4 z?L-Y;+|I>(oRa7DoW&yk!sFpD!jss05>sGQz2`;dSDmMj#SOk`Je}I7@k*!)ZlDw% zHmT)210s+x+GeRGk5@9Yd;Xq#9pOJJkUEXW);cIhW$hlwW~yb5@2iW>H14m($}O|frwf-E^ZHRTB^isOei zHofX)F8A|~LMm;cVZP9-j!}izNL5ei%O13nyQ{EnD_~dbjIWJQs~@V*6R|hDv1i}= zIaICEZtP`%VVctCg8X~h+V2ozD_l!lD2D7GvVe8gGc|EU){eV4ZHiP(XLNESWb)q9 zttEg$=lm8ID7xId)9PjND z05|Kq*1T@{aG=E7dZYx7CeSV&mJsSD#P-pE=zhzBWhwU(-XmI^L6v)Uc50Hp85EKW zz*uHe5e$->b%Do%MuneXo~c!=?+ntX{io>bEPLgQxfCu6IUp zkgqE;#tX=RP5|85vYk%@$Za#YzAek|k_%gA5Tbqrn}PLp8uWOJx5jUheX zrj#^5D5&N3{kw%hS93G%;I32$6c{_%GGl4E$hhfgW@(VhJNa$>pV-IgaU~RzEqDS&9tfUJ1IaE7zAZ6cx)P>aj$S@*SO;-_zXNK|UFO4?LQ+L7 z8PnNU+Nnv(hNHKkS4q?G#?-o*`r}C6Flx1v7wc1p} zJQP%p6qj=q&MM5S%M$Yw^%B2Gl^nrAo&Wy#9 zv>sUxQaCNn!oLox;fc_v(y#s2X$5;{05I$5Hl`(oTu#QCW2~H(E8PjT+;+d|&;B** zP>1>urW9e9<^`Y&AUG5waq)>zg%>!hB*z$9KR+$WKiD>lLn23x!{$oq_eY+p zpcQA7Rs5N$&pS{!RtrycZwu2r_+CHiEG%||R9*4ec~~~qGVIc;nwD=XG@^tVG*DKX z7OHNh+dd5b&<);16B_c>8TYZ}DV?UJ9iE+HfmA)NypxtN^NXMlgI!u*n@hvDT))p6 zCeTcw(Tg9eR(s_<)z#g=Z$Eo;Kc36UL~R;HDK2Ukr@C@1;Vwu30tjW;IZ03#gEgvL^XmiSwb)li8#u!6+?q4Paq%+P9Cx$Nq zdGFLAtV^#{T3%=8*A+OxQox@Oa&ANg)+_QIK+`f;ZmVLLVEG!)P*v5fBZ1mC#OLDH zWbDxg`nno{nc6(7=9CtC@ap>wC4bC6D== zZc<8ue#}75|2K0&lXyM7QJ*0e%fOZMlz#~66?z&`VG0fAz7ko8bu0{63P{+$6B){O zXySnxQpzmgfR`!adtjYzVWMcT2k0RtZ*E8CYRw%}$j2-nD+x5m~zJ(tC^L)1`FlL%}k2 zUmAqS+u0S=uG33AJSdm{WIO~nuE~8H2jjTm3UVPx%3-X78PR*=AzObwR%B!#K0BMZ z=1RO;DQ{vjyC?&@!_tR;4C#I~6929A4{JnX$2dhJqEF3&KmSJ&4U1}@J&6Qi7DfnS z11L}HfWQM&8XVqMgs|s(ms4VEUo#~94x70y=7&mEfKvfJ_8q7Zfk+j0J)N|b;T}5x zc68yBTrsBK8*Y9_f2{6YkZO|AgJ_1C=#*OdZ-^u2WvZXfnNvbu?Uixbp*8=(uEO&v zB^C;>7Fq9B$LTcw0nRj}OwVMj?*Jb->VV0D4$^V5TEW`4&MCF)1)yw#U5ICMK8< zhg>@i1-(G{>CGj`%1x5-@!Jg7Qf4y$#n^Af-!G zJ5WO@JQ7*ETVB2*7cN%`X|l%s?X$@Py@A`+88*A?!zd$q7odCjXTKe-?%sHXyg>=tzDP|1@!#yz{V=lJ2MD&H zHz=rA1402PzN^cN&luyrQ}$mE6F6=emR)|C%^~Vp)a8jb@7(Fx_)4{U;5y%>uQW~W zWNKnT3oYxNM*cY=bOCp${p;=@4VsdGm>KZ>JhbRr2l9ylxMe zXe2wm)_hgxgpgl^53$-gq`zHnd3ONwy$%g<;c()(w!f$-~E(h1)? z{Z0kwcNZnsf^b5_#u(^P526Ytxk+a-tyI*`k z+%49z0}wZ$_h!!4F6lHN%0;*Z1{0SOLV~E^>}PAJ`(up6N8;^%4Wrve?Q5_bc3+63gAtm*-q_B zU#=)$v`Oe_aQANiMKzTm6YWWiq!OlJWNM3!oN5uTOBUTH?KqT)lpXy~1os;n za46g#Z$Q7gQX>!LiI{CoK9~J-LMyeh}MW%^{*oadr)lIGF-_!fh)2=VN zpD@bY4M19s4PwI}auDS{x7>0~Pb+ORdUjaGBb)hwyq?Gh8|Ozf7sM8f@Ea>Hb6M4+R zEIdGzAB`01B#$0>Nt!79W)G6MOl~W5<-F^3cscm^mCQ{wuCIMY^w7Az#O!+myEP=x zZ2j5JeRVaHI;8LESe?X7&+auXph#*Fcd`-V!=h+&ZDF(Q8M7drhQQ8Z?N2{u0 zFrw)+T#Cnr_B(O-Sf2wp{0zY12e6e|hn4H{1oZL8NPtl;8oE62rJXq!n*SXDEDNqX zuVlq5nU>vV_woIpLfd1YE5a^B;J~JHlmllH=j(^ITvI!UxaHl*g9mz6-l8Cg@k4Vl ziYI3M3&@2|&TK#KZK=yczRO~`e(l`e(**b60WM6)_tQlB6%#3kFqs)~6sgr%=*5G< zj&AcX;G0jsqOlmxLzJ5+5hRyu?gl_Z20RnvsbXbmgRz;GCj^+&^~2^pS1UaN63c=< z64-qOjG$FXk1IBU?m3}WgYo$v%fMCvV|inJ!Q+_M4>N-vd^!p8P$-f5;`A4rX1H5N z=RpPMJ>vQdM~&n}M9pO?EXQXAbz%jFUzN1BRCswQXx`SQn)|k98+Cw%We`bK+(lnY zea2j5mTBat3#@~k0bbn*E-db%x>V8x;2d;McO9py^L2gl-=w_gtLafm{X0d$0mA%5 zQsq&>jM@UNA)Tzhr0<~u(aA~+S0)y)?bmPaHMOE za5{VX=aVRU*XK7BQKH)Rzp%Arl{EnE6ieonkhoYSO{&)%e8Uf&Xls#!<>G*D zZ*^!&VQPs6tv*w#EzAUHcZZks)175DbT(t02nt#;&aV>Uwviu8tN8Kfv@8xvi+hcv zZ#|~^LATckPxyv-FJ%8_zL4kY#0r#s+&2+o&Cz!xzf6CasF}1Tz-FYNOP|m zs(rVilDUA;#0z0nb$>TeIniF0PnG_LMK<5UcJ_+PGY8`*&W&9sj`}Zic>d zX{ho>JW3Wlc}Dhw603t4wc{JY11qdYdH94WE3w1 zxZ=8$jN%@PWauiOSU1k6OWx=i(o==6P~qMy)ShOUa0aZm;U7xjA^m@^V3 zu_XeOk+!v*hzPkqUnw=nJc*wLQoQSd9%WJ=C&pN-8QL`qrb^m3Ud>5|UcP?GKmT(_ zZCjA=c2LY9(y&-s2$r1k2erzg0kQ|>H_$pZW$r$RKos@VqEDD>#PWsV0`{PVX*Ed* z-Xa9V_)l8^5d`%yd%Acg$_NLG)jZf)PvG;{~^!$4GX-I62wDi z-is-fV_mSQ5nc%G2?&*dEMRn%(nfXZr)jdR-vkeZ`KNdyVlvIk%zY7kAfXN<%R;fl zttnL5NQfgKQ6DFf*VN=l`*@$kejMWSu=V+<0?5t)AUhr&m%}K0W`Ehv*R3!7kcGb? zr2c5JQYmF87b~3Px5S?3O}!Y{--aHzc->W!ET#QJ8b1BrJs;nBi#wF}ny>D5I`fmbEQ)!K-@7z-3eya4I|M{5M=^P5yXWocRT@Y2z-fh;Y&@V%^$VGdO*uWwM zHV!I;))n=680+($!%&~c9D>E!`|u6WMZ;zP*Nv#b;%>|3G6-Zzj3X35j&%)u*bSSBDQ2me=UJD z+nF`5*f&qoO>lx=U9#Lhihx=<*F|q~{!B0bY9>?(^rD~nV~v%A@cT=g2fxZmzLK%gAgY;srY5EE0$BBju$9T!j0!lUUtb?KV%zjWt*3{q2bR`}W}&7rvuPnOP`D;WE%%Zq<{lmA zfuhblSqi01qxyElx8DlIt*cFIL_3%qM7#+fGR9p8c<@1KToFfHZU(k%t#7(gu>>)G z?Zd3zgLSd*EA#nr%U{nq!jf_1W2QCPq^K;2Mw{hg>Y;+ryOnAXrdE48t1L}iEyOt! z2N`SYyk(sk>Qbq-R{~tXi~~Bawe#WjYRj$Y)kln(JSf!Fa7Z*ayfW@d^FONJFL4%w zna}?iFz>@hW|{bA(zRXQCn>hmYEg_F*I zJX&jqqWafRcZ~@Y{I>ph33EYbqc$*B=o~s=_$$Ds7K} zn(p-D@O_EYzWE$AgKbh`GJ`sAUDiAI-HI3obdOmsJdgRtqFZhdGH9P_EH}s@l(q&e ze;HX%{?8_Hii^YpTO@;EwW32ZLyBS_R;F(P8 zt{F+ZXtp&QnF(7&g1btlyIA1spYf9HfQxAt0c;QF>D@%7p6{5Vk#}ZO#TX@}wY}XA zKH&v2-wMTgIg^S8HjqD!F|B@O0fZ;1g7{CW0@PzlOVv&7razCvGd7c!c)##rI0y-> zaUi;8W2*@7I=5^dOGWukh;I%DFJmQ)4gAQcAn16 z8o_aXcP8HpjO2}l>62K$tG^x}GC|tyc?A9wcqB-B*WU|{gNM42D_-RGZDWOJRO6q# z#WzN595l=MhJ4;WM6c>5s_NXjiEp?eNnR{}yS04Enqaa&+}iu=u0RjT4HQM0HmI_? zxiI#zl08E5*m%)t z*d7zgH_W316$|o0O5Qpv(%bJ{V||3Nxq1jh*QuZKj1>```Fa8qiFRrEV^o>@EV+iw zS$k@*JWkstL36&X_}tsQ<9s8D{5rB2DB38EEDTmSsJX;03;%T9Tl^XbvNla+P! z5e=@@s-y3gf-%#qKkm=lHW6CV7IF%j0$EIvoQ0vyp-NWU{w)KJ`MkI>&Q@t!S+>?t zVPhYii*ZcO7r@jnMS`MvLWC1E7ab`p0awWtaIZ0+t?9xJKbk2z{{yt-zH)h27rx zc;$>qmW_{Olxi;Mu=41c6KDq>p3!8(ab5=F?0Z&5=-y_)tkCwB-AlD9no;i--q$S0 zD~kw4z=`j^8@tk*!jc5oNzlcl!%N2L@mPm>^LoapTY{JZuSr=Ze@ERfn$H?VXt6O* zwOCW2c8`xvY6vyNlNaD_8xk}=Wg@;~+UnwE9X2Gk5qP`tsd8+ySuT+d2BBug zgoHhW@ixGdH}4fOjzSW!#C#qpVKg3jqNkUVqV5jqH#Ka6Db+gB{Hzp8MDM;nGt6lnpn`G#eEw;i^ezkXNm>u3u2#5%4DL zEJEZGYsyw1$UA)=ce(slZd1TR4_cNNe4V{*%aR{+SAV`a5@K7HRAm%c&{9N0d*R4$ zztQYEVPmrAg`rTPHuYJuMPn@vR+cnx^6bV>N8LM4*F8475!;ejkPeVASX8E1U;$`M z&&0g&?zYh9!X`~{hoYy_se;(S=|j0C3dL0Q2A<@p&7NfuIO%@61A`)pvZ`-^5?TE@bC1SzaLjl%I$1-WBPS|1D5cmdte za@f);56o@KA2~;`<&?WcIR?)AJN`+f@@id75dOu8h&81l2U_c|&#Dc5HSb~j3PL-A zJdriQf9yvI-FQ3lM#!`YVCRcPIEp7N$)g*j{YlY!*M7$rQW`Bu* zg+&KP(-M}5CROJ(AD~aIE>XXn7tIRF-QGR%8;e8+Yw=~B7leo*=00W5{hwQW-sMQ` zgEA|Z!!Y^Ph;FJyD0{a%RI6Q@(WXscoIaY6*tk1bS6=aIpL<0@Aby$h*P5cqm$xZs z?sXkq9%n`Eu+T2yG1*q1iP`{CwT+)&gvQ#oX<&5|GDnb@O2IGBVJ8?3zrA#9$0V`x z&#$eTEfNYU+R>`phv@m^Q}zjg>8}Vjc~X!9DMV8YbGNo`_^i#D{_S+;UQZ5@mH+ZG z9}2NB+*ABTkH&NhTTOQ==LYjy0)b5=qZbIoWFELH&C=xaJuKJ*_*V7`qCQ9etNr$BzWjQg@i(cq#&8i||IAqcfoTLR(C zI!7NN0MBnch)00OV?1+L#kdFc|CSOOhVHNIfHG;LX!q$yM@imCqq3#HQfwOT2A5Cs zb1udqK=WPgVNk*3oA+!yddqW_kp>^%wgfW_Vkh^(lq*$o1=hHFDkJS%wdKHG{Smef z`;zLWaepZTay7-rpjoe*cm2VybR|dze7A2Nn7jn}^`BN>`@+ z!e;Z4`rE7Np`%}N*eLbZn0l->d#RNFd1!5fazn&s`>!v~o059fw@aS|!b$FCvCcd6 zD0khh+_>|1BZ${@-NIHhOW<&+vK4ISiD7tO)s5di6MX=eD?gMhGCi!J@OD`+KXP4( z0y~B1S+k1~jk|hEEQY|uE>BxvmBIXslMe4Y1GHz5Zxt`o*fw7Jut>dBW?7^}v8l(g zOrH;BdZG~3n*7x%%Ip`qJZDR=Tm-eBR42W&<1Td7btW|W=haku$p(q_sxHIzC280F zd+YkqK7JnDp5ryfZ1H+|nN`2X2Hs9=1T9mJ@$oskJFydmxu-#Qbsw=d*jxPJN$gj8 z;)*9#t3#~xLWQEI@nAn{sU2Vvj)cpi?wquQKQx<_?piEa#>J~QF*xG1Di-?Jg~r;* zCpJyv8#j?Svqu6T^hC_Ugv89E5C_;T`Q=YHDBQ?Gcjsja zl?U{=>4{zO87uiCsVmk-*(%mzF<8x^s~)FX zFLdpkxB`5ST&JWIoS4hfDj!IvZPHxt@EKps(&MjVGPtX1I#N2>ptB!i<`=r7j^&Kh zBl1u#$$FF?XopP6z>uHUs!Y-`3~qT#4r@M;-3bH)zrB(`U4dw6nT=v!skJe|1+c9J42j{q!hzh_{fAMB7XY;+n9%lPIXf+y@RUB2MFG@=G8-KtV- zMWuzy2hA&)JWYj{UoAlgTL88g}lTgB=_4gD? zLp^4}gf5Rd=E8a@%n9gb9=apSxiMERjKkyg@SWP>a_ggM{~r)vvGwr2vJ{(Y@$pJv z;Qo4QXhmL2Cj`&inD;o78*35qhHuO_bUd8CjiFdE6qdoRK6b;-Zwqgd%~H)1Q>;-( zgWcb5BH5hq{@qu@tsM#NC?T&Li))r6rc!`V_n>QpJyZ~?Dt#2Wq4zoioyJc!^<8vC z7Y=N0$nL^0V}0U2iN)rW22=4iNms)2B=b!J2s|aQF=`E9-^jF57r|Jugguaxne z;fH#xxodnjOZYF(t;IeeF-1h^ctAk}yq1prAn;cM&%NpUQ*$LXcT}m7xVyspedZr$pG8^n(y0=T1(GZx$Vs(VG0H8=YQtMz!cnWd#<9B3=t%q_4fe0ZnV?-ze zYE{I_`C`)9J6#f)x5-JC#I2kzH)@#yx$?C_F72)6vDMu z3mf#eu_GSlP`a5xu7g}jI=#|LKJ@F0+p2|kBp!{ZlYM}|Yuutdi1b<-KPG?I^RrF~ zt=G3C&`8o<^66OZM$`7}&tD3x1ZX~Ydl|5l-C6Ch;N-7aUmi3of7@^tSi0#6%rm?W zvcGS>=uA()%OsgV+P^VO2e?&%ZQ_rXgZCbsW!xi@ENC~!S;m(fR0Au6f7Fi6a)