From 25a83468806531cfa5a81734583cd374d4d1eb01 Mon Sep 17 00:00:00 2001 From: xperzy Date: Wed, 26 Jan 2022 18:25:29 +0800 Subject: [PATCH 01/12] update MAE using new impl --- image_classification/MAE/config.py | 54 +- .../vit_base_patch16_224_finetune.yaml | 26 +- .../vit_base_patch16_224_linearprobe.yaml | 44 + .../vit_base_patch16_224_pretrain.yaml | 14 +- .../vit_base_patch16_224_pretrain_dec1.yaml | 7 +- .../vit_huge_patch14_224_finetune.yaml | 44 + .../vit_huge_patch14_224_linearprobe.yaml | 44 + .../vit_huge_patch14_224_pretrain.yaml | 32 + .../vit_large_patch16_224_finetune.yaml | 30 +- .../vit_large_patch16_224_linearprobe.yaml | 44 + .../vit_large_patch16_224_pretrain.yaml | 22 +- image_classification/MAE/datasets.py | 109 +- image_classification/MAE/losses.py | 2 + image_classification/MAE/lr_decay.py | 66 + .../MAE/main_multi_gpu_finetune.py | 514 +- .../MAE/main_multi_gpu_linearprobe.py | 562 + .../MAE/main_multi_gpu_pretrain.py | 289 +- .../MAE/main_single_gpu_finetune.py | 403 - .../MAE/main_single_gpu_pretrain.py | 308 - image_classification/MAE/masking_generator.py | 50 - image_classification/MAE/nohup.out | 9507 ----------------- image_classification/MAE/random_erasing.py | 118 + image_classification/MAE/run_finetune.sh | 8 - .../MAE/run_finetune_multi.sh | 3 +- .../MAE/run_linear_probe_multi.sh | 8 + image_classification/MAE/run_pretrain.sh | 8 - .../MAE/run_pretrain_multi.sh | 9 +- .../MAE/run_pretrain_multi_resume.sh | 10 - image_classification/MAE/stat_define.py | 61 - image_classification/MAE/tests/__init__.py | 1 - image_classification/MAE/tests/test_config.py | 72 - .../MAE/tests/test_config.yaml | 14 - .../MAE/tests/test_datasets.py | 147 - .../MAE/tests/test_transformer.py | 115 - image_classification/MAE/tests/test_utils.py | 90 - image_classification/MAE/transformer.py | 474 +- image_classification/MAE/utils.py | 87 + 37 files changed, 1850 insertions(+), 11546 deletions(-) create mode 100644 image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml create mode 100644 image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml create mode 100644 image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml create mode 100644 image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml create mode 100644 image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml create mode 100644 image_classification/MAE/lr_decay.py create mode 100644 image_classification/MAE/main_multi_gpu_linearprobe.py delete mode 100644 image_classification/MAE/main_single_gpu_finetune.py delete mode 100644 image_classification/MAE/main_single_gpu_pretrain.py delete mode 100644 image_classification/MAE/masking_generator.py delete mode 100644 image_classification/MAE/nohup.out create mode 100644 image_classification/MAE/random_erasing.py delete mode 100644 image_classification/MAE/run_finetune.sh create mode 100644 image_classification/MAE/run_linear_probe_multi.sh delete mode 100644 image_classification/MAE/run_pretrain.sh delete mode 100644 image_classification/MAE/run_pretrain_multi_resume.sh delete mode 100644 image_classification/MAE/stat_define.py delete mode 100644 image_classification/MAE/tests/__init__.py delete mode 100644 image_classification/MAE/tests/test_config.py delete mode 100644 image_classification/MAE/tests/test_config.yaml delete mode 100644 image_classification/MAE/tests/test_datasets.py delete mode 100644 image_classification/MAE/tests/test_transformer.py delete mode 100644 image_classification/MAE/tests/test_utils.py diff --git a/image_classification/MAE/config.py b/image_classification/MAE/config.py index 7a2cf65b..c066d9d5 100644 --- a/image_classification/MAE/config.py +++ b/image_classification/MAE/config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,15 +41,15 @@ # model settings _C.MODEL = CN() -_C.MODEL.TYPE = 'MAE' +_C.MODEL.TYPE = 'PRETRAIN' # [PRETRAIN, FINETUNE, LINEARPROBE] _C.MODEL.NAME = 'MAE' _C.MODEL.RESUME = None _C.MODEL.PRETRAINED = None _C.MODEL.NUM_CLASSES = 1000 -_C.MODEL.DROPOUT = 0.0 -_C.MODEL.DROPPATH = 0.0 -_C.MODEL.ATTENTION_DROPOUT = 0.0 -_C.MODEL.MAE_PRETRAIN = True +_C.MODEL.DROPOUT = 0.1 +_C.MODEL.DROPPATH = 0.1 +_C.MODEL.ATTENTION_DROPOUT = 0.1 +_C.MODEL.GLOBAL_POOL = False # use for finetune only # transformer settings _C.MODEL.TRANS = CN() @@ -57,6 +57,7 @@ _C.MODEL.TRANS.MLP_RATIO = 4.0 _C.MODEL.TRANS.QKV_BIAS = True _C.MODEL.TRANS.MASK_RATIO = 0.75 +_C.MODEL.TRANS.NORM_PIX_LOSS = True _C.MODEL.TRANS.ENCODER = CN() _C.MODEL.TRANS.ENCODER.DEPTH = 12 _C.MODEL.TRANS.ENCODER.EMBED_DIM = 768 @@ -71,27 +72,35 @@ _C.TRAIN = CN() _C.TRAIN.LAST_EPOCH = 0 _C.TRAIN.NUM_EPOCHS = 800 -_C.TRAIN.WARMUP_EPOCHS = 40 -_C.TRAIN.WEIGHT_DECAY = 0.05 -_C.TRAIN.BASE_LR = 1.5e-4 +_C.TRAIN.WARMUP_EPOCHS = 40 # 34 # ~ 10k steps for 4096 batch size +_C.TRAIN.WEIGHT_DECAY = 0.05 # 0.3 # 0.0 for finetune +_C.TRAIN.BASE_LR = 1.5e-4 # 0.003 for pretrain # 0.03 for finetune _C.TRAIN.WARMUP_START_LR = 1e-6 # 0.0 -_C.TRAIN.END_LR = 0.0 +_C.TRAIN.END_LR = 5e-4 _C.TRAIN.GRAD_CLIP = None -_C.TRAIN.ACCUM_ITER = 1 -_C.TRAIN.LINEAR_SCALED_LR = 256 -_C.TRAIN.NORMALIZE_TARGET = True +_C.TRAIN.ACCUM_ITER = 2 # 1 +_C.TRAIN.LINEAR_SCALED_LR = None +_C.TRAIN.LAYER_DECAY = None # used for finetuning only # train augmentation (only for finetune) _C.TRAIN.SMOOTHING = 0.1 -_C.TRAIN.RAND_AUGMENT = False +_C.TRAIN.COLOR_JITTER = 0.4 +_C.TRAIN.RAND_AUGMENT = True _C.TRAIN.RAND_AUGMENT_LAYERS = 9 _C.TRAIN.RAND_AUGMENT_MAGNITUDE = 5 # scale from 0 to 10 -_C.TRAIN.MIXUP_ALPHA = 0.8 +# mixup params +_C.TRAIN.MIXUP_ALPHA = 0.0 _C.TRAIN.MIXUP_PROB = 1.0 _C.TRAIN.MIXUP_SWITCH_PROB = 0.5 _C.TRAIN.MIXUP_MODE = 'batch' -_C.TRAIN.CUTMIX_ALPHA = 1.0 +_C.TRAIN.CUTMIX_ALPHA = 0.0 _C.TRAIN.CUTMIX_MINMAX = None +# random erase parameters +_C.TRAIN.RANDOM_ERASE_PROB = 0.25 +_C.TRAIN.RANDOM_ERASE_MODE = 'pixel' +_C.TRAIN.RANDOM_ERASE_COUNT = 1 +_C.TRAIN.RANDOM_ERASE_SPLIT = False + _C.TRAIN.LR_SCHEDULER = CN() _C.TRAIN.LR_SCHEDULER.NAME = 'warmupcosine' @@ -102,7 +111,7 @@ _C.TRAIN.OPTIMIZER = CN() _C.TRAIN.OPTIMIZER.NAME = 'AdamW' _C.TRAIN.OPTIMIZER.EPS = 1e-8 -_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.95) # same as MAE paper, for adamW +_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.95) # for adamW same as pytorch MAE _C.TRAIN.OPTIMIZER.MOMENTUM = 0.9 @@ -145,24 +154,19 @@ def update_config(config, args): config.defrost() if args.dataset: config.DATA.DATASET = args.dataset - if args.eval: - config.EVAL = True if args.batch_size: config.DATA.BATCH_SIZE = args.batch_size - if config.EVAL: - config.DATA.BATCH_SIZE_EVAL = args.batch_size if args.image_size: config.DATA.IMAGE_SIZE = args.image_size if args.data_path: config.DATA.DATA_PATH = args.data_path - if args.output is not None: - config.SAVE = args.output if args.ngpus: config.NGPUS = args.ngpus + if args.eval: + config.EVAL = True + config.DATA.BATCH_SIZE_EVAL = args.batch_size if args.pretrained: config.MODEL.PRETRAINED = args.pretrained - if args.mae_pretrain: - config.MODEL.MAE_PRETRAIN = args.mae_pretrain if args.resume: config.MODEL.RESUME = args.resume if args.last_epoch: diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml b/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml index 9cee1446..eb666192 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml @@ -2,34 +2,31 @@ DATA: IMAGE_SIZE: 224 CROP_PCT: 0.875 MODEL: - TYPE: MAE + TYPE: FINETUNE NAME: vit_base_patch16_224 DROPPATH: 0.1 + GLOBAL_POOL: True TRANS: PATCH_SIZE: 16 MLP_RATIO: 4.0 QKV_BIAS: true - MASK_RATIO: 0.75 ENCODER: EMBED_DIM: 768 DEPTH: 12 NUM_HEADS: 12 - TRAIN: - NUM_EPOCHS: 100 + NUM_EPOCHS: 50 WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 BASE_LR: 1e-3 - WARMUP_START_LR: 1e-6 - ACCUM_ITER: 2 # the total batch size should be 1024 - - LR_SCHEDULER: - NAME: 'warmupcosine' - + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 + END_LR: 1e-6 + ACCUM_ITER: 1 OPTIMIZER: NAME: 'AdamW' BETAS: (0.9, 0.999) - + LAYER_DECAY: 0.65 SMOOTHING: 0.1 RAND_AUGMENT: True RAND_AUGMENT_LAYERS: 9 @@ -39,4 +36,9 @@ TRAIN: MIXUP_SWITCH_PROB: 0.5 MIXUP_MODE: 'batch' CUTMIX_ALPHA: 1.0 - CUTMIX_MINMAX: None \ No newline at end of file + CUTMIX_MINMAX: None + RANDOM_ERASE_PROB: 0.25 + RANDOM_ERASE_MODE: 'pixel' + RANDOM_ERASE_COUNT: 1 + RANDOM_ERASE_SPLIT: False + diff --git a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml b/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml new file mode 100644 index 00000000..4a3d039d --- /dev/null +++ b/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml @@ -0,0 +1,44 @@ +DATA: + IMAGE_SIZE: 224 + CROP_PCT: 0.875 +MODEL: + TYPE: LINEARPROBE + NAME: vit_base_patch16_224 + DROPPATH: 0.1 + GLOBAL_POOL: False + TRANS: + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 +TRAIN: + NUM_EPOCHS: 90 + WARMUP_EPOCHS: 10 + WEIGHT_DECAY: 0.0 + BASE_LR: 0.1 + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 + END_LR: 0.0 + ACCUM_ITER: 1 + OPTIMIZER: + NAME: 'AdamW' + BETAS: (0.9, 0.999) + #LAYER_DECAY: 0.75 + #SMOOTHING: 0.1 + #RAND_AUGMENT: True + #RAND_AUGMENT_LAYERS: 9 + #RAND_AUGMENT_MAGNITUDE: 5 + #MIXUP_ALPHA: 0.0 + #MIXUP_PROB: 1.0 + #MIXUP_SWITCH_PROB: 0.5 + #MIXUP_MODE: 'batch' + #CUTMIX_ALPHA: 0.0 + #CUTMIX_MINMAX: None + #RANDOM_ERASE_PROB: 0.25 + #RANDOM_ERASE_MODE: 'pixel' + #RANDOM_ERASE_COUNT: 1 + #RANDOM_ERASE_SPLIT: False + diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml b/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml index 5eb52f39..e43573dc 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml @@ -2,10 +2,9 @@ DATA: IMAGE_SIZE: 224 CROP_PCT: 0.875 MODEL: - TYPE: MAE + TYPE: PRETRAIN NAME: vit_base_patch16_224 DROPPATH: 0.0 - MAE_PRETRAIN: True TRANS: PATCH_SIZE: 16 MLP_RATIO: 4.0 @@ -18,19 +17,16 @@ MODEL: DECODER: EMBED_DIM: 512 DEPTH: 8 - NUM_HEADS: 8 + NUM_HEADS: 16 TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 - WARMUP_START_LR: 1e-6 + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 GRAD_CLIP: None - ACCUM_ITER: 2 # the total batch size should be 4096 - - LR_SCHEDULER: - NAME: 'warmupcosine' - + ACCUM_ITER: 1 OPTIMIZER: NAME: 'AdamW' BETAS: (0.9, 0.95) diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml b/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml index c4284444..43f1fa73 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml @@ -2,10 +2,9 @@ DATA: IMAGE_SIZE: 224 CROP_PCT: 0.875 MODEL: - TYPE: MAE + TYPE: PRETRAIN NAME: vit_base_patch16_224_dec1 DROPPATH: 0.0 - MAE_PRETRAIN: True TRANS: PATCH_SIZE: 16 MLP_RATIO: 4.0 @@ -25,8 +24,8 @@ TRAIN: WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 WARMUP_START_LR: 1e-6 - GRAD_CLIP: None - ACCUM_ITER: 2 # 8gpus only have 2048 batch size, the total batch size should be 4096 + GRAD_CLIP: 1 + ACCUM_ITER: 1 # the total batch size should be 4096 LINEAR_SCALED_LR: None LR_SCHEDULER: diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml new file mode 100644 index 00000000..0c15171b --- /dev/null +++ b/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml @@ -0,0 +1,44 @@ +DATA: + IMAGE_SIZE: 224 + CROP_PCT: 0.875 +MODEL: + TYPE: FINETUNE + NAME: vit_huge_patch14_224 + DROPPATH: 0.3 + GLOBAL_POOL: True + TRANS: + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 1280 + DEPTH: 32 + NUM_HEADS: 16 +TRAIN: + NUM_EPOCHS: 50 + WARMUP_EPOCHS: 5 + WEIGHT_DECAY: 0.05 + BASE_LR: 1e-3 + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 + END_LR: 1e-6 + ACCUM_ITER: 1 + OPTIMIZER: + NAME: 'AdamW' + BETAS: (0.9, 0.999) + LAYER_DECAY: 0.75 + SMOOTHING: 0.1 + RAND_AUGMENT: True + RAND_AUGMENT_LAYERS: 9 + RAND_AUGMENT_MAGNITUDE: 5 + MIXUP_ALPHA: 0.8 + MIXUP_PROB: 1.0 + MIXUP_SWITCH_PROB: 0.5 + MIXUP_MODE: 'batch' + CUTMIX_ALPHA: 1.0 + CUTMIX_MINMAX: None + RANDOM_ERASE_PROB: 0.25 + RANDOM_ERASE_MODE: 'pixel' + RANDOM_ERASE_COUNT: 1 + RANDOM_ERASE_SPLIT: False + diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml new file mode 100644 index 00000000..e753155f --- /dev/null +++ b/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml @@ -0,0 +1,44 @@ +DATA: + IMAGE_SIZE: 224 + CROP_PCT: 0.875 +MODEL: + TYPE: LINEARPROBE + NAME: vit_huge_patch14_224 + DROPPATH: 0.1 + GLOBAL_POOL: False + TRANS: + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 1280 + DEPTH: 32 + NUM_HEADS: 16 +TRAIN: + NUM_EPOCHS: 90 + WARMUP_EPOCHS: 10 + WEIGHT_DECAY: 0.0 + BASE_LR: 0.1 + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 + END_LR: 0.0 + ACCUM_ITER: 1 + OPTIMIZER: + NAME: 'AdamW' + BETAS: (0.9, 0.999) + #LAYER_DECAY: 0.75 + #SMOOTHING: 0.1 + #RAND_AUGMENT: True + #RAND_AUGMENT_LAYERS: 9 + #RAND_AUGMENT_MAGNITUDE: 5 + #MIXUP_ALPHA: 0.8 + #MIXUP_PROB: 1.0 + #MIXUP_SWITCH_PROB: 0.5 + #MIXUP_MODE: 'batch' + #CUTMIX_ALPHA: 1.0 + #CUTMIX_MINMAX: None + #RANDOM_ERASE_PROB: 0.25 + #RANDOM_ERASE_MODE: 'pixel' + #RANDOM_ERASE_COUNT: 1 + #RANDOM_ERASE_SPLIT: False + diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml new file mode 100644 index 00000000..ccb6bfef --- /dev/null +++ b/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml @@ -0,0 +1,32 @@ +DATA: + IMAGE_SIZE: 224 + CROP_PCT: 0.875 +MODEL: + TYPE: PRETRAIN + NAME: vit_huge_patch14_224 + DROPPATH: 0.0 + TRANS: + PATCH_SIZE: 14 + MLP_RATIO: 4.0 + QKV_BIAS: true + MASK_RATIO: 0.75 + ENCODER: + EMBED_DIM: 1280 + DEPTH: 32 + NUM_HEADS: 16 + DECODER: + EMBED_DIM: 512 + DEPTH: 8 + NUM_HEADS: 16 +TRAIN: + NUM_EPOCHS: 800 + WARMUP_EPOCHS: 40 + WEIGHT_DECAY: 0.05 + BASE_LR: 1.5e-4 + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 + GRAD_CLIP: None + ACCUM_ITER: 1 + OPTIMIZER: + NAME: 'AdamW' + BETAS: (0.9, 0.95) diff --git a/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml b/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml index 11136830..050ec685 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml +++ b/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml @@ -2,34 +2,31 @@ DATA: IMAGE_SIZE: 224 CROP_PCT: 0.875 MODEL: - TYPE: MAE + TYPE: FINETUNE NAME: vit_large_patch16_224 DROPPATH: 0.1 + GLOBAL_POOL: True TRANS: PATCH_SIZE: 16 MLP_RATIO: 4.0 QKV_BIAS: true - MASK_RATIO: 0.75 ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 - + EMBED_DIM: 1024 + DEPTH: 24 + NUM_HEADS: 16 TRAIN: NUM_EPOCHS: 50 WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 BASE_LR: 1e-3 - WARMUP_START_LR: 1e-6 - ACCUM_ITER: 2 # the total batch size should be 1024 - - LR_SCHEDULER: - NAME: 'warmupcosine' - + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 + END_LR: 1e-6 + ACCUM_ITER: 1 OPTIMIZER: NAME: 'AdamW' BETAS: (0.9, 0.999) - + LAYER_DECAY: 0.65 SMOOTHING: 0.1 RAND_AUGMENT: True RAND_AUGMENT_LAYERS: 9 @@ -39,4 +36,9 @@ TRAIN: MIXUP_SWITCH_PROB: 0.5 MIXUP_MODE: 'batch' CUTMIX_ALPHA: 1.0 - CUTMIX_MINMAX: None \ No newline at end of file + CUTMIX_MINMAX: None + RANDOM_ERASE_PROB: 0.25 + RANDOM_ERASE_MODE: 'pixel' + RANDOM_ERASE_COUNT: 1 + RANDOM_ERASE_SPLIT: False + diff --git a/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml b/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml new file mode 100644 index 00000000..e91bc21d --- /dev/null +++ b/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml @@ -0,0 +1,44 @@ +DATA: + IMAGE_SIZE: 224 + CROP_PCT: 0.875 +MODEL: + TYPE: LINEARPROBE + NAME: vit_large_patch16_224 + DROPPATH: 0.1 + GLOBAL_POOL: False + TRANS: + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 1024 + DEPTH: 24 + NUM_HEADS: 16 +TRAIN: + NUM_EPOCHS: 90 + WARMUP_EPOCHS: 10 + WEIGHT_DECAY: 0.0 + BASE_LR: 0.1 + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 + END_LR: 0.0 + ACCUM_ITER: 1 + OPTIMIZER: + NAME: 'AdamW' + BETAS: (0.9, 0.999) + #LAYER_DECAY: 0.75 + #SMOOTHING: 0.1 + #RAND_AUGMENT: True + #RAND_AUGMENT_LAYERS: 9 + #RAND_AUGMENT_MAGNITUDE: 5 + #MIXUP_ALPHA: 0.8 + #MIXUP_PROB: 1.0 + #MIXUP_SWITCH_PROB: 0.5 + #MIXUP_MODE: 'batch' + #CUTMIX_ALPHA: 1.0 + #CUTMIX_MINMAX: None + #RANDOM_ERASE_PROB: 0.25 + #RANDOM_ERASE_MODE: 'pixel' + #RANDOM_ERASE_COUNT: 1 + #RANDOM_ERASE_SPLIT: False + diff --git a/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml b/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml index 04b5e086..15eec2a1 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml +++ b/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml @@ -2,35 +2,31 @@ DATA: IMAGE_SIZE: 224 CROP_PCT: 0.875 MODEL: - TYPE: MAE + TYPE: PRETRAIN NAME: vit_large_patch16_224 DROPPATH: 0.0 - MAE_PRETRAIN: True TRANS: PATCH_SIZE: 16 MLP_RATIO: 4.0 QKV_BIAS: true MASK_RATIO: 0.75 ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 + EMBED_DIM: 1024 + DEPTH: 24 + NUM_HEADS: 16 DECODER: EMBED_DIM: 512 DEPTH: 8 - NUM_HEADS: 8 + NUM_HEADS: 16 TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 - WARMUP_START_LR: 1e-6 + WARMUP_START_LR: 0.0 + LINEAR_SCALED_LR: 256 GRAD_CLIP: None - ACCUM_ITER: 2 # the total batch size should be 4096 - - LR_SCHEDULER: - NAME: 'warmupcosine' - + ACCUM_ITER: 1 OPTIMIZER: NAME: 'AdamW' - BETAS: (0.9, 0.95) \ No newline at end of file + BETAS: (0.9, 0.95) diff --git a/image_classification/MAE/datasets.py b/image_classification/MAE/datasets.py index 1d6c17d3..c90330d8 100644 --- a/image_classification/MAE/datasets.py +++ b/image_classification/MAE/datasets.py @@ -30,10 +30,9 @@ from augment import AutoAugment from augment import rand_augment_policy_original from augment import RandAugment -from masking_generator import RandomMaskingGenerator -from transforms import RandomHorizontalFlip from random_erasing import RandomErasing + class ImageNet2012Dataset(Dataset): """Build ImageNet2012 dataset @@ -51,15 +50,7 @@ def __init__(self, file_folder, mode="train", transform=None): super(ImageNet2012Dataset, self).__init__() assert mode in ["train", "val"] self.file_folder = file_folder - - if isinstance(transform, tuple): - # training: transform = [transform, mask_generator] - self.transform = transform[0] - self.mask_generator = transform[1] # if mae finetune, mask_generator is None - else: - # val: transform = transform - self.transform = transform - self.mask_generator = None + self.transform = transform self.img_path_list = [] self.label_list = [] @@ -82,46 +73,53 @@ def __len__(self): def __getitem__(self, index): data = image_load(self.img_path_list[index]).convert('RGB') data = self.transform(data) - if self.mask_generator is not None: - mask = self.mask_generator() - else: - mask = None + label = self.label_list[index] - if mask is None: - label = self.label_list[index] - return data, label + return data, label - return data, mask +def get_train_transforms_pretrain(config): + """Simple augmentation for pretraining""" + aug_op_list = [transforms.RandomResizedCrop(size=(config.DATA.IMAGE_SIZE, config.DATA.IMAGE_SIZE), + scale=(0.2, 1.0), + interpolation='bicubic'), # same as MAE pytorch + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=config.DATA.IMAGENET_MEAN, std=config.DATA.IMAGENET_STD)] + transforms_train = transforms.Compose(aug_op_list) + return transforms_train -def get_train_transforms(config): - """ Get training transforms - For training, a RandomResizedCrop is applied, then normalization is applied with - [0.5, 0.5, 0.5] mean and std. The input pixel values must be rescaled to [0, 1.] - Outputs is converted to tensor +def get_train_transforms_linearprobe(config): + """Weak augmentation for linear probing""" + aug_op_list = [transforms.RandomResizedCrop(size=(config.DATA.IMAGE_SIZE, config.DATA.IMAGE_SIZE), + interpolation='bicubic'), # same as MAE pytorch + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=config.DATA.IMAGENET_MEAN, std=config.DATA.IMAGENET_STD)] + transforms_train = transforms.Compose(aug_op_list) + return transforms_train - Args: - config: configs contains IMAGE_SIZE, see config.py for details - Returns: - transforms_train: training transforms - """ +def get_train_transforms_finetune(config): + """Full augmentation for finetuning""" aug_op_list = [] # STEP1: random crop and resize aug_op_list.append( transforms.RandomResizedCrop((config.DATA.IMAGE_SIZE, config.DATA.IMAGE_SIZE), - scale=(0.05, 1.0), interpolation='bicubic')) - # STEP2: auto_augment or color jitter - if config.TRAIN.AUTO_AUGMENT: - policy = auto_augment_policy_original() - auto_augment = AutoAugment(policy) - aug_op_list.append(auto_augment) - elif config.TRAIN.RAND_AUGMENT: + scale=(0.2, 1.0), interpolation='bicubic'))# Same as MAE pytorch + # STEP2: random horizontalflip + aug_op_list.append(transforms.RandomHorizontalFlip()) + # STEP3: rand_augment or auto_augment or color jitter + if config.TRAIN.RAND_AUGMENT: # MAE: True policy = rand_augment_policy_original() rand_augment = RandAugment(policy) aug_op_list.append(rand_augment) - else: + elif config.TRAIN.AUTO_AUGMENT: # MAE: None + policy = auto_augment_policy_original() + auto_augment = AutoAugment(policy) + aug_op_list.append(auto_augment) + else: # MAE: None jitter = (float(config.TRAIN.COLOR_JITTER), ) * 3 aug_op_list.append(transforms.ColorJitter(*jitter)) # STEP3: other ops @@ -138,17 +136,35 @@ def get_train_transforms(config): # Final: compose transforms and return transforms_train = transforms.Compose(aug_op_list) - if config.MODEL.MAE_PRETRAIN: - # for MAE pretraining - mask_generator = RandomMaskingGenerator( - input_size=config.DATA.IMAGE_SIZE // config.MODEL.TRANS.PATCH_SIZE, - mask_ratio=config.MODEL.TRANS.MASK_RATIO) - else: - mask_generator = None + return transforms_train + + +def get_train_transforms(config): + """ Get training transforms - return (transforms_train, mask_generator) + For training, a RandomResizedCrop is applied, then normalization is applied with + mean and std. The input pixel values must be rescaled to [0, 1.] + Outputs is converted to tensor + + Args: + config: configs contains IMAGE_SIZE, see config.py for details + Returns: + transforms_train: training transforms + """ + assert config.MODEL.TYPE in ["PRETRAIN", "FINETUNE", "LINEARPROBE"] + if config.MODEL.TYPE == "PRETRAIN": + transforms_train = get_train_transforms_pretrain + elif config.MODEL.TYPE == "FINETUNE": + transforms_train = get_train_transforms_finetune + elif config.MODEL.TYPE == "LINEARPROBE": + transforms_train = get_train_transforms_linearprobe + else: + raise ValueError('config.MODEL.TYPE not supported!') + + return transforms_train(config) +# val transform is for MAE finetune and line probing def get_val_transforms(config): """ Get training transforms @@ -168,8 +184,7 @@ def get_val_transforms(config): transforms.Resize(scale_size, 'bicubic'), # single int for resize shorter side of image transforms.CenterCrop((config.DATA.IMAGE_SIZE, config.DATA.IMAGE_SIZE)), transforms.ToTensor(), - transforms.Normalize(mean=config.DATA.IMAGENET_MEAN, std=config.DATA.IMAGENET_STD), - ]) + transforms.Normalize(mean=config.DATA.IMAGENET_MEAN, std=config.DATA.IMAGENET_STD)]) return transforms_val diff --git a/image_classification/MAE/losses.py b/image_classification/MAE/losses.py index f67780a2..082467a3 100644 --- a/image_classification/MAE/losses.py +++ b/image_classification/MAE/losses.py @@ -119,3 +119,5 @@ def forward(self, inputs, outputs, targets): loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha return loss + + diff --git a/image_classification/MAE/lr_decay.py b/image_classification/MAE/lr_decay.py new file mode 100644 index 00000000..482eca45 --- /dev/null +++ b/image_classification/MAE/lr_decay.py @@ -0,0 +1,66 @@ +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""parameters groups for layer-wise lr decay, used in BeiT and MAE""" + +def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=0.75): + param_group_names = {} + param_groups = {} + num_layers = len(model.encoder.layers) + 1 + layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1)) + + for n, p in model.named_parameters(): + if p.stop_gradient is True: + continue + + # no decay + if p.ndim == 1 or n in no_weight_decay_list: + g_decay = 'no_decay' + this_decay = 0. + else: + g_decay = 'decay' + this_decay = weight_decay + + layer_id = get_layer_id_for_vit(n, num_layers) + group_name = f"layer_{layer_id}_{g_decay}" + + if group_name not in param_group_names: + this_scale = layer_scales[layer_id] + param_group_names[group_name] = { + "learning_rate": this_scale, # TODO: check correctness + "weight_decay": this_decay, + "params": [], + } + param_groups[group_name] = { + "learning_rate": this_scale, + "weight_decay": this_decay, + "params": [], + } + + param_group_names[group_name]["params"].append(n) + param_groups[group_name]["params"].append(p) + return list(param_groups.values()) + + +def get_layer_id_for_vit(name, num_layers): + """assign a parameter with its layer id""" + if name in ['cls_token', 'position_embedding']: + return 0 + elif name.startswith('patch_embedding'): + return 0 + elif name.startswith('encoder.layers'): + return int(name.split('.')[2]) + 1 + else: + return num_layers + diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/image_classification/MAE/main_multi_gpu_finetune.py index a6ace004..2eab37fd 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/image_classification/MAE/main_multi_gpu_finetune.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""MAE finetuning/validation using multiple GPU """ +"""MAE finetuning using multiple GPU """ import sys import os @@ -27,20 +27,24 @@ import paddle.distributed as dist from datasets import get_dataloader from datasets import get_dataset -from transformer import build_mae_finetune as build_model +from mixup import Mixup from losses import LabelSmoothingCrossEntropyLoss from losses import SoftTargetCrossEntropyLoss +from transformer import build_transformer as build_model from utils import AverageMeter from utils import WarmupCosineScheduler from utils import get_exclude_from_weight_decay_fn +from utils import get_params_groups +from utils import cosine_scheduler +from utils import interpolate_pos_embed +import lr_decay from config import get_config from config import update_config -from mixup import Mixup def get_arguments(): """return argumeents, this will overwrite the config after loading yaml file""" - parser = argparse.ArgumentParser('ViT') + parser = argparse.ArgumentParser('MAE') parser.add_argument('-cfg', type=str, default=None) parser.add_argument('-dataset', type=str, default=None) parser.add_argument('-batch_size', type=int, default=None) @@ -52,7 +56,6 @@ def get_arguments(): parser.add_argument('-resume', type=str, default=None) parser.add_argument('-last_epoch', type=int, default=None) parser.add_argument('-eval', action='store_true') - parser.add_argument('-mae_pretrain', action='store_true') parser.add_argument('-amp', action='store_true') arguments = parser.parse_args() return arguments @@ -77,10 +80,30 @@ def get_logger(filename, logger_name=None): return logger +def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): + if local_logger: + if level == 'info': + local_logger.info(msg_local) + elif level == 'fatal': + local_logger.fatal(msg_local) + else: + raise ValueError("level must in ['info', 'fatal']") + if master_logger and dist.get_rank() == 0: + if msg_master is None: + msg_master = msg_local + if level == 'info': + master_logger.info("MASTER_LOG " + msg_master) + elif level == 'fatal': + master_logger.fatal("MASTER_LOG " + msg_master) + else: + raise ValueError("level must in ['info', 'fatal']") + + def train(dataloader, model, - criterion, optimizer, + criterion, + lr_schedule, epoch, total_epochs, total_batch, @@ -94,7 +117,9 @@ def train(dataloader, Args: dataloader: paddle.io.DataLoader, dataloader instance model: nn.Layer, a ViT model - criterion: nn.criterion + optimizer: nn.optimizer + criterion: nn.XXLoss + lr_schedule: list of float, lr schdeule epoch: int, current epoch total_epochs: int, total num of epochs total_batch: int, total num of batches for one epoch @@ -106,50 +131,52 @@ def train(dataloader, master_logger: logger for main process, default: None Returns: train_loss_meter.avg: float, average loss on current process/gpu - train_acc_meter.avg: float, average top1 accuracy on current process/gpu - master_train_loss_meter.avg: float, average loss on all processes/gpus - master_train_acc_meter.avg: float, average top1 accuracy on all processes/gpus + train_acc_meter.avg: float, average acc@1 on current process/gpu + master_loss_meter.avg: float, average loss on all processes/gpus + master_acc_meter.avg: float, average acc@1 on all processes/gpus train_time: float, training time """ model.train() train_loss_meter = AverageMeter() train_acc_meter = AverageMeter() - master_train_loss_meter = AverageMeter() - master_train_acc_meter = AverageMeter() + master_loss_meter = AverageMeter() + master_acc_meter = AverageMeter() if amp is True: - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) + scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 time_st = time.time() for batch_id, data in enumerate(dataloader): - image = data[0] + # get data + images = data[0] label = data[1] label_orig = label.clone() if mixup_fn is not None: - image, label = mixup_fn(image, label_orig) - - if amp is True: # mixed precision training - with paddle.amp.auto_cast(): - output = model(image) - loss = criterion(output, label) - scaled = scaler.scale(loss) - scaled.backward() - if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - scaler.minimize(optimizer, scaled) - optimizer.clear_grad() - else: # full precision training - output = model(image) + images, label = mixup_fn(images, label_orig) + + # set per iteration lr using scheduler + global_train_iter = total_batch * (epoch - 1) + batch_id # epoch starts from 1 + optimizer.set_lr(lr_schedule[global_train_iter]) + # forward + with paddle.amp.auto_cast(amp is True): + output = model(images) loss = criterion(output, label) - # NOTE: division may be needed depending on the loss function - # Here no division is needed: - # default 'reduction' param in nn.CrossEntropyLoss is set to 'mean' - # loss = loss / accum_iter - loss.backward() + if not amp: # fp32 + loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): optimizer.step() optimizer.clear_grad() + else: + scaled = scaler.scale(loss) + scaled.backward() + # TODO: check if manually unscale and clip grad is required here + if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): + # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 + scaler.step(optimizer) + scaler.update() + optimizer.clear_grad() pred = F.softmax(output) if mixup_fn: @@ -157,144 +184,139 @@ def train(dataloader, else: acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1)) - batch_size = paddle.to_tensor(image.shape[0]) - # sync from other gpus for overall loss and acc - master_loss = loss.clone() - master_acc = acc.clone() - master_batch_size = batch_size.clone() + batch_size = paddle.to_tensor(images.shape[0]) + master_loss = paddle.to_tensor(loss.numpy()) + master_acc = paddle.to_tensor(acc.numpy()) + master_batch_size = paddle.to_tensor(batch_size.numpy()) dist.all_reduce(master_loss) dist.all_reduce(master_acc) dist.all_reduce(master_batch_size) master_loss = master_loss / dist.get_world_size() master_acc = master_acc / dist.get_world_size() - master_train_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) - master_train_acc_meter.update(master_acc.numpy()[0], master_batch_size.numpy()[0]) + + master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) + master_acc_meter.update(master_acc.numpy()[0], master_batch_size.numpy()[0]) train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) train_acc_meter.update(acc.numpy()[0], batch_size.numpy()[0]) if batch_id % debug_steps == 0: - if local_logger: - local_logger.info( - f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {train_loss_meter.avg:.4f}, " + - f"Avg Acc: {train_acc_meter.avg:.4f}") - if master_logger and dist.get_rank() == 0: - master_logger.info( - f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {master_train_loss_meter.avg:.4f}, " + - f"Avg Acc: {master_train_acc_meter.avg:.4f}") + local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + + f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Avg Loss: {train_loss_meter.avg:.4f}, " + + f"Avg Acc: {train_acc_meter.avg:.4f}") + master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + + f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Avg Loss: {master_loss_meter.avg:.4f}, " + + f"Avg Acc: {master_acc_meter.avg:.4f}") + write_log(local_logger, master_logger, local_message, master_message) train_time = time.time() - time_st return (train_loss_meter.avg, train_acc_meter.avg, - master_train_loss_meter.avg, - master_train_acc_meter.avg, + master_loss_meter.avg, + master_acc_meter.avg, train_time) +@paddle.no_grad() def validate(dataloader, model, - criterion, + optimizer, total_batch, debug_steps=100, local_logger=None, master_logger=None): - """Validation for whole dataset + """Validation for the whole dataset Args: dataloader: paddle.io.DataLoader, dataloader instance model: nn.Layer, a ViT model - criterion: nn.criterion - total_epoch: int, total num of epoch, for logging + total_batch: int, total num of batches for one epoch debug_steps: int, num of iters to log info, default: 100 local_logger: logger for local process/gpu, default: None master_logger: logger for main process, default: None Returns: val_loss_meter.avg: float, average loss on current process/gpu - val_acc1_meter.avg: float, average top1 accuracy on current process/gpu - val_acc5_meter.avg: float, average top5 accuracy on current process/gpu - master_val_loss_meter.avg: float, average loss on all processes/gpus - master_val_acc1_meter.avg: float, average top1 accuracy on all processes/gpus - master_val_acc5_meter.avg: float, average top5 accuracy on all processes/gpus + val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus + val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus + master_loss_meter.avg: float, average loss on all processes/gpus + master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus + master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus val_time: float, validation time """ model.eval() val_loss_meter = AverageMeter() val_acc1_meter = AverageMeter() val_acc5_meter = AverageMeter() - master_val_loss_meter = AverageMeter() - master_val_acc1_meter = AverageMeter() - master_val_acc5_meter = AverageMeter() + master_loss_meter = AverageMeter() + master_acc1_meter = AverageMeter() + master_acc5_meter = AverageMeter() + + if amp is True: + scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 time_st = time.time() - with paddle.no_grad(): - for batch_id, data in enumerate(dataloader): - image = data[0] - label = data[1] + for batch_id, data in enumerate(dataloader): + # get data + images = data[0] + label = data[1] + + output = model(image) + loss = criterion(output, label) - output = model(image) - loss = criterion(output, label) + pred = F.softmax(output) + acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)) + acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5) + + # sync from other gpus for overall loss and acc + batch_size = paddle.to_tensor(images.shape[0]) + master_loss = paddle.to_tensor(loss.numpy()) + master_acc1 = paddle.to_tensor(acc1.numpy()) + master_acc5 = paddle.to_tensor(acc5.numpy()) + master_batch_size = paddle.to_tensor(batch_size.numpy()) + dist.all_reduce(master_loss) + dist.all_reduce(master_batch_size) + dist.all_reduce(master_acc1) + dist.all_reduce(master_acc5) + master_loss = master_loss / dist.get_world_size() + master_acc1 = master_acc1 / dist.get_world_size() + master_acc5 = master_acc5 / dist.get_world_size() + master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) + master_acc1_meter.update(master_acc1.numpy()[0], master_batch_size.numpy()[0]) + master_acc5_meter.update(master_acc5.numpy()[0], master_batch_size.numpy()[0]) + val_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) + val_acc1_meter.update(acc1.numpy()[0], batch_size.numpy()[0]) + val_acc5_meter.update(acc5.numpy()[0], batch_size.numpy()[0]) + + if batch_id % debug_steps == 0: + local_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Avg Loss: {val_loss_meter.avg:.4f}, " + + f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " + + f"Avg Acc@5: {val_acc5_meter.avg:.4f}") + master_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Avg Loss: {master_loss_meter.avg:.4f}, " + + f"Avg Acc@1: {master_acc1_meter.avg:.4f}, " + + f"Avg Acc@5: {master_acc5_meter.avg:.4f}") + write_log(local_logger, master_logger, local_message, master_message) - pred = F.softmax(output) - acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)) - acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5) - - batch_size = paddle.to_tensor(image.shape[0]) - - master_loss = loss.clone() - master_acc1 = acc1.clone() - master_acc5 = acc5.clone() - master_batch_size = batch_size.clone() - - dist.all_reduce(master_loss) - dist.all_reduce(master_acc1) - dist.all_reduce(master_acc5) - dist.all_reduce(master_batch_size) - master_loss = master_loss / dist.get_world_size() - master_acc1 = master_acc1 / dist.get_world_size() - master_acc5 = master_acc5 / dist.get_world_size() - - master_val_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) - master_val_acc1_meter.update(master_acc1.numpy()[0], master_batch_size.numpy()[0]) - master_val_acc5_meter.update(master_acc5.numpy()[0], master_batch_size.numpy()[0]) - - val_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) - val_acc1_meter.update(acc1.numpy()[0], batch_size.numpy()[0]) - val_acc5_meter.update(acc5.numpy()[0], batch_size.numpy()[0]) - - if batch_id % debug_steps == 0: - if local_logger: - local_logger.info( - f"Val Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {val_loss_meter.avg:.4f}, " + - f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " + - f"Avg Acc@5: {val_acc5_meter.avg:.4f}") - if master_logger and dist.get_rank() == 0: - master_logger.info( - f"Val Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {master_val_loss_meter.avg:.4f}, " + - f"Avg Acc@1: {master_val_acc1_meter.avg:.4f}, " + - f"Avg Acc@5: {master_val_acc5_meter.avg:.4f}") val_time = time.time() - time_st return (val_loss_meter.avg, val_acc1_meter.avg, val_acc5_meter.avg, - master_val_loss_meter.avg, - master_val_acc1_meter.avg, - master_val_acc5_meter.avg, + master_loss_meter.avg, + master_acc1_meter.avg, + master_acc5_meter.avg, val_time) def main_worker(*args): # STEP 0: Preparation - config = args[0] dist.init_parallel_env() - last_epoch = config.TRAIN.LAST_EPOCH world_size = dist.get_world_size() local_rank = dist.get_rank() + config = args[0] + last_epoch = config.TRAIN.LAST_EPOCH seed = config.SEED + local_rank paddle.seed(seed) np.random.seed(seed) @@ -311,25 +333,27 @@ def main_worker(*args): master_logger.info(f'\n{config}') else: master_logger = None - local_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}') - if local_rank == 0: - master_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}') + + message = f'----- world_size = {world_size}, local_rank = {local_rank}' + write_log(local_logger, master_logger, message) # STEP 1: Create model model = build_model(config) model = paddle.DataParallel(model) # STEP 2: Create train and val dataloader - dataset_train, dataset_val = args[1], args[2] - dataloader_train = get_dataloader(config, dataset_train, 'train', True) - dataloader_val = get_dataloader(config, dataset_val, 'test', True) - total_batch_train = len(dataloader_train) + if not config.EVAL: + dataset_train = args[1] + dataloader_train = get_dataloader(config, dataset_train, 'train', True) + total_batch_train = len(dataloader_train) + message = f'----- Total # of train batch (single gpu): {total_batch_train}' + write_log(local_logger, master_logger, message) + + dataset_val = args[2] + dataloader_val = get_dataloader(config, dataset_val, 'val', True) total_batch_val = len(dataloader_val) - local_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}') - local_logger.info(f'----- Total # of val batch (single gpu): {total_batch_val}') - if local_rank == 0: - master_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}') - master_logger.info(f'----- Total # of val batch (single gpu): {total_batch_val}') + message = f'----- Total # of val batch (single gpu): {total_batch_val}' + write_log(local_logger, master_logger, message) # STEP 3: Define Mixup function mixup_fn = None @@ -352,7 +376,7 @@ def main_worker(*args): # only use cross entropy for val criterion_val = nn.CrossEntropyLoss() - # STEP 5: Define optimizer and lr_scheduler + # STEP 4: Define optimizer and lr_scheduler # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) if config.TRAIN.LINEAR_SCALED_LR is not None: linear_scaled_lr = ( @@ -371,129 +395,112 @@ def main_worker(*args): config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr config.TRAIN.END_LR = linear_scaled_end_lr - scheduler = None - if config.TRAIN.LR_SCHEDULER.NAME == "warmupcosine": - scheduler = WarmupCosineScheduler(learning_rate=config.TRAIN.BASE_LR, - warmup_start_lr=config.TRAIN.WARMUP_START_LR, - start_lr=config.TRAIN.BASE_LR, - end_lr=config.TRAIN.END_LR, - warmup_epochs=config.TRAIN.WARMUP_EPOCHS, - total_epochs=config.TRAIN.NUM_EPOCHS, - last_epoch=config.TRAIN.LAST_EPOCH, - ) - elif config.TRAIN.LR_SCHEDULER.NAME == "cosine": - scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=config.TRAIN.BASE_LR, - T_max=config.TRAIN.NUM_EPOCHS, - last_epoch=last_epoch) - elif config.scheduler == "multi-step": - milestones = [int(v.strip()) - for v in config.TRAIN.LR_SCHEDULER.MILESTONES.split(",")] - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=config.TRAIN.BASE_LR, - milestones=milestones, - gamma=config.TRAIN.LR_SCHEDULER.DECAY_RATE, - last_epoch=last_epoch) + lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale + config.TRAIN.END_LR, + config.TRAIN.NUM_EPOCHS, + len(dataloader_train), + warmup_epochs=config.TRAIN.WARMUP_EPOCHS) + + #params_groups = get_params_groups(model) + params_groups = lr_decay.param_groups_lrd( + model=model._layers, # TODO: check correctness + weight_decay=config.TRAIN.WEIGHT_DECAY, + layer_decay=config.TRAIN.LAYER_DECAY) + + if config.TRAIN.GRAD_CLIP: + clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) else: - local_logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") - if local_rank == 0: - master_logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") - raise NotImplementedError(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") + clip = None if config.TRAIN.OPTIMIZER.NAME == "SGD": - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None optimizer = paddle.optimizer.Momentum( - parameters=model.parameters(), + parameters=params_groups, learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, grad_clip=clip) elif config.TRAIN.OPTIMIZER.NAME == "AdamW": - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None optimizer = paddle.optimizer.AdamW( - parameters=model.parameters(), - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, + parameters=params_groups, + learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], - weight_decay=config.TRAIN.WEIGHT_DECAY, + weight_decay=1.0, #config.TRAIN.WEIGHT_DECAY, epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip, - #apply_decay_param_fun=get_exclude_from_weight_decay_fn(['pos_embed', 'cls_token']), - ) + grad_clip=clip) else: - local_logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") - if local_rank == 0: - master_logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") - raise NotImplementedError(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") + message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." + write_log(local_logger, master_logger, message, None, 'fatal') + raise NotImplementedError(message) - # STEP 6: Load pretrained model / load resumt model and optimizer states + # STEP 5: Load pretrained model / load resumt model and optimizer states if config.MODEL.PRETRAINED: - if (config.MODEL.PRETRAINED).endswith('.pdparams'): - raise ValueError( - f'{config.MODEL.PRETRAINED} should not contain .pdparams') assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True - model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams') + model_state = paddle.load(config.MODEL.PRETRAINED + '.pdparams') + + if not config.EVAL: + keys = ['encoder.norm.weight', 'encoder.norm.bias', + 'classfier.weight', 'classifier.bias'] + if config.MODEL.GLOBAL_POOL: + del model_state[keys[0]] + del model_state[keys[1]] + + # interpolate position embedding + interpolate_pos_embed(model, model_state) + + model.set_dict(model_state) - local_logger.info(f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}") - if local_rank == 0: - master_logger.info( - f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}") + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" + write_log(local_logger, master_logger, message) if config.MODEL.RESUME: - assert os.path.isfile(config.MODEL.RESUME + '.pdparams') is True - assert os.path.isfile(config.MODEL.RESUME + '.pdopt') is True - model_state = paddle.load(config.MODEL.RESUME + '.pdparams') + assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True + assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True + model_state = paddle.load(config.MODEL.RESUME+'.pdparams') model.set_dict(model_state) opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') optimizer.set_state_dict(opt_state) - local_logger.info( - f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}") - if local_rank == 0: - master_logger.info( - f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}") - - # STEP 7: Validation (eval mode) + message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}" + write_log(local_logger, master_logger, message) + + # STEP 6: Validation (eval mode) if config.EVAL: - local_logger.info('----- Start Validating') - if local_rank == 0: - master_logger.info('----- Start Validating') + write_log(local_logger, master_logger, f"----- Start Validation") val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( dataloader=dataloader_val, model=model, criterion=criterion_val, - total_batch=total_batch_val, + total_batch=total_batch_train, debug_steps=config.REPORT_FREQ, local_logger=local_logger, master_logger=master_logger) - local_logger.info(f"Validation Loss: {val_loss:.4f}, " + - f"Validation Acc@1: {val_acc1:.4f}, " + - f"Validation Acc@5: {val_acc5:.4f}, " + - f"time: {val_time:.2f}") - if local_rank == 0: - master_logger.info(f"Validation Loss: {avg_loss:.4f}, " + - f"Validation Acc@1: {avg_acc1:.4f}, " + - f"Validation Acc@5: {avg_acc5:.4f}, " + - f"time: {val_time:.2f}") - return - - # STEP 8: Start training and validation (train mode) - local_logger.info(f"Start training from epoch {last_epoch+1}.") - if local_rank == 0: - master_logger.info(f"Start training from epoch {last_epoch+1}.") - for epoch in range(last_epoch+1, config.TRAIN.NUM_EPOCHS+1): + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {val_loss:.4f}, " + + f"Validation Acc@1: {val_acc1:.4f}, " + + f"Validation Acc@1: {val_acc5:.4f}, " + + f"time: {val_time:.2f}") + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {avg_loss:.4f}, " + + f"Validation Acc@1: {avg_acc1:.4f}, " + + f"Validation Acc@1: {avg_acc5:.4f}, " + + f"time: {val_time:.2f}") + + + + # STEP 7: Start training (train mode) + write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") + for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): # train - local_logger.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}") - if local_rank == 0: - master_logger.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}") + write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") + train_loss, train_acc, avg_loss, avg_acc, train_time = train( dataloader=dataloader_train, model=model, - criterion=criterion, optimizer=optimizer, + criterion=criterion, + lr_schedule=lr_schedule, epoch=epoch, total_epochs=config.TRAIN.NUM_EPOCHS, total_batch=total_batch_train, @@ -504,42 +511,42 @@ def main_worker(*args): local_logger=local_logger, master_logger=master_logger) - scheduler.step() + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Train Loss: {train_loss:.4f}, " + + f"Train Acc: {train_acc:.4f}, " + + f"time: {train_time:.2f}") - local_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {train_loss:.4f}, " + - f"Train Acc: {train_acc:.4f}, " + + master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Train Loss: {avg_loss:.4f}, " + + f"Train Acc: {avg_acc:.4f}, " + f"time: {train_time:.2f}") - if local_rank == 0: - master_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {avg_loss:.4f}, " + - f"Train Acc: {avg_acc:.4f}, " + - f"time: {train_time:.2f}") + write_log(local_logger, master_logger, local_message, master_message) # validation - if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: - local_logger.info(f'----- Validation after Epoch: {epoch}') - if local_rank == 0: - master_logger.info(f'----- Validation after Epoch: {epoch}') + if epoch % config.VALIDATION_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: + write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}') val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( dataloader=dataloader_val, model=model, criterion=criterion_val, - total_batch=total_batch_val, + total_batch=total_batch_train, debug_steps=config.REPORT_FREQ, local_logger=local_logger, master_logger=master_logger) - local_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Validation Loss: {val_loss:.4f}, " + - f"Validation Acc@1: {val_acc1:.4f}, " + - f"Validation Acc@5: {val_acc5:.4f}, " + - f"time: {val_time:.2f}") - if local_rank == 0: - master_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Validation Loss: {avg_loss:.4f}, " + - f"Validation Acc@1: {avg_acc1:.4f}, " + - f"Validation Acc@5: {avg_acc5:.4f}, " + - f"time: {val_time:.2f}") + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {val_loss:.4f}, " + + f"Validation Acc@1: {val_acc1:.4f}, " + + f"Validation Acc@1: {val_acc5:.4f}, " + + f"time: {val_time:.2f}") + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {avg_loss:.4f}, " + + f"Validation Acc@1: {avg_acc1:.4f}, " + + f"Validation Acc@1: {avg_acc5:.4f}, " + + f"time: {val_time:.2f}") + write_log(local_logger, master_logger, local_message, master_message) + # model save if local_rank == 0: if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: @@ -547,11 +554,9 @@ def main_worker(*args): config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}") paddle.save(model.state_dict(), model_path + '.pdparams') paddle.save(optimizer.state_dict(), model_path + '.pdopt') - local_logger.info(f"----- Save model: {model_path}.pdparams") - local_logger.info(f"----- Save optim: {model_path}.pdopt") - if local_rank == 0: - master_logger.info(f"----- Save model: {model_path}.pdparams") - master_logger.info(f"----- Save optim: {model_path}.pdopt") + message = (f"----- Save model: {model_path}.pdparams \n" + + f"----- Save optim: {model_path}.pdopt") + write_log(local_logger, master_logger, message) def main(): @@ -559,21 +564,22 @@ def main(): arguments = get_arguments() config = get_config() config = update_config(config, arguments) - # set output folder if not config.EVAL: - config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) + config.SAVE = '{}/finetuning-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) else: config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - if not os.path.exists(config.SAVE): os.makedirs(config.SAVE, exist_ok=True) - - # get dataset and start DDP - dataset_train = get_dataset(config, mode='train') + # get dataset + if not config.EVAL: + dataset_train = get_dataset(config, mode='train') + else: + dataset_train = None dataset_val = get_dataset(config, mode='val') + # start training config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS - dist.spawn(main_worker, args=(config, dataset_train, dataset_val, ), nprocs=config.NGPUS) + dist.spawn(main_worker, args=(config, dataset_train, dataset_val), nprocs=config.NGPUS) if __name__ == "__main__": diff --git a/image_classification/MAE/main_multi_gpu_linearprobe.py b/image_classification/MAE/main_multi_gpu_linearprobe.py new file mode 100644 index 00000000..96fb8283 --- /dev/null +++ b/image_classification/MAE/main_multi_gpu_linearprobe.py @@ -0,0 +1,562 @@ +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MAE linear probing using multiple GPU """ + +import sys +import os +import time +import logging +import argparse +import random +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddle.distributed as dist +from datasets import get_dataloader +from datasets import get_dataset +from losses import LabelSmoothingCrossEntropyLoss +from losses import SoftTargetCrossEntropyLoss +from transformer import build_transformer as build_model +from utils import AverageMeter +from utils import WarmupCosineScheduler +from utils import get_exclude_from_weight_decay_fn +from utils import get_params_groups +from utils import cosine_scheduler +from utils import interpolate_pos_embed +from config import get_config +from config import update_config + + +def get_arguments(): + """return argumeents, this will overwrite the config after loading yaml file""" + parser = argparse.ArgumentParser('MAE') + parser.add_argument('-cfg', type=str, default=None) + parser.add_argument('-dataset', type=str, default=None) + parser.add_argument('-batch_size', type=int, default=None) + parser.add_argument('-image_size', type=int, default=None) + parser.add_argument('-data_path', type=str, default=None) + parser.add_argument('-output', type=str, default=None) + parser.add_argument('-ngpus', type=int, default=None) + parser.add_argument('-pretrained', type=str, default=None) + parser.add_argument('-resume', type=str, default=None) + parser.add_argument('-last_epoch', type=int, default=None) + parser.add_argument('-eval', action='store_true') + parser.add_argument('-amp', action='store_true') + arguments = parser.parse_args() + return arguments + + +def get_logger(filename, logger_name=None): + """set logging file and format + Args: + filename: str, full path of the logger file to write + logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' + Return: + logger: python logger + """ + log_format = "%(asctime)s %(message)s" + logging.basicConfig(stream=sys.stdout, level=logging.INFO, + format=log_format, datefmt="%m%d %I:%M:%S %p") + # different name is needed when creating multiple logger in one process + logger = logging.getLogger(logger_name) + fh = logging.FileHandler(os.path.join(filename)) + fh.setFormatter(logging.Formatter(log_format)) + logger.addHandler(fh) + return logger + + +def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): + if local_logger: + if level == 'info': + local_logger.info(msg_local) + elif level == 'fatal': + local_logger.fatal(msg_local) + else: + raise ValueError("level must in ['info', 'fatal']") + if master_logger and dist.get_rank() == 0: + if msg_master is None: + msg_master = msg_local + if level == 'info': + master_logger.info("MASTER_LOG " + msg_master) + elif level == 'fatal': + master_logger.fatal("MASTER_LOG " + msg_master) + else: + raise ValueError("level must in ['info', 'fatal']") + + +def train(dataloader, + model, + optimizer, + criterion, + lr_schedule, + epoch, + total_epochs, + total_batch, + debug_steps=100, + accum_iter=1, + amp=False, + local_logger=None, + master_logger=None): + """Training for one epoch + Args: + dataloader: paddle.io.DataLoader, dataloader instance + model: nn.Layer, a ViT model + optimizer: nn.optimizer + criterion: nn.XXLoss + lr_schedule: list of float, lr schdeule + epoch: int, current epoch + total_epochs: int, total num of epochs + total_batch: int, total num of batches for one epoch + debug_steps: int, num of iters to log info, default: 100 + accum_iter: int, num of iters for accumulating gradients, default: 1 + amp: bool, if True, use mix precision training, default: False + local_logger: logger for local process/gpu, default: None + master_logger: logger for main process, default: None + Returns: + train_loss_meter.avg: float, average loss on current process/gpu + train_acc_meter.avg: float, average acc@1 on current process/gpu + master_loss_meter.avg: float, average loss on all processes/gpus + master_acc_meter.avg: float, average acc@1 on all processes/gpus + train_time: float, training time + """ + model.train() + train_loss_meter = AverageMeter() + train_acc_meter = AverageMeter() + master_loss_meter = AverageMeter() + master_acc_meter = AverageMeter() + + if amp is True: + scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + time_st = time.time() + + for batch_id, data in enumerate(dataloader): + # get data + images = data[0] + label = data[1] + + # set per iteration lr using scheduler + global_train_iter = total_batch * (epoch - 1) + batch_id # epoch starts from 1 + optimizer.set_lr(lr_schedule[global_train_iter]) + # forward + with paddle.amp.auto_cast(amp is True): + output = model(images) + loss = criterion(output, label) + + if not amp: # fp32 + loss.backward() + if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): + optimizer.step() + optimizer.clear_grad() + else: + scaled = scaler.scale(loss) + scaled.backward() + # TODO: check if manually unscale and clip grad is required here + if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): + # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 + scaler.step(optimizer) + scaler.update() + optimizer.clear_grad() + + pred = F.softmax(output) + acc = paddle.metric.accuracy(pred, label.unsqueeze(1)) + + # sync from other gpus for overall loss and acc + batch_size = paddle.to_tensor(images.shape[0]) + master_loss = paddle.to_tensor(loss.numpy()) + master_acc = paddle.to_tensor(acc.numpy()) + master_batch_size = paddle.to_tensor(batch_size.numpy()) + dist.all_reduce(master_loss) + dist.all_reduce(master_acc) + dist.all_reduce(master_batch_size) + master_loss = master_loss / dist.get_world_size() + master_acc = master_acc / dist.get_world_size() + master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) + master_acc_meter.update(master_acc.numpy()[0], master_batch_size.numpy()[0]) + + train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) + train_acc_meter.update(acc.numpy()[0], batch_size.numpy()[0]) + + if batch_id % debug_steps == 0: + local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + + f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Avg Loss: {train_loss_meter.avg:.4f}, " + + f"Avg Acc: {train_acc_meter.avg:.4f}") + master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + + f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Avg Loss: {master_loss_meter.avg:.4f}, " + + f"Avg Acc: {master_acc_meter.avg:.4f}") + write_log(local_logger, master_logger, local_message, master_message) + + train_time = time.time() - time_st + return (train_loss_meter.avg, + train_acc_meter.avg, + master_loss_meter.avg, + master_acc_meter.avg, + train_time) + + +@paddle.no_grad() +def validate(dataloader, + model, + optimizer, + total_batch, + debug_steps=100, + local_logger=None, + master_logger=None): + """Validation for the whole dataset + Args: + dataloader: paddle.io.DataLoader, dataloader instance + model: nn.Layer, a ViT model + total_batch: int, total num of batches for one epoch + debug_steps: int, num of iters to log info, default: 100 + local_logger: logger for local process/gpu, default: None + master_logger: logger for main process, default: None + Returns: + val_loss_meter.avg: float, average loss on current process/gpu + val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus + val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus + master_loss_meter.avg: float, average loss on all processes/gpus + master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus + master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus + val_time: float, validation time + """ + model.eval() + val_loss_meter = AverageMeter() + val_acc1_meter = AverageMeter() + val_acc5_meter = AverageMeter() + master_loss_meter = AverageMeter() + master_acc1_meter = AverageMeter() + master_acc5_meter = AverageMeter() + + if amp is True: + scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + time_st = time.time() + + for batch_id, data in enumerate(dataloader): + # get data + images = data[0] + label = data[1] + + output = model(image) + loss = criterion(output, label) + + pred = F.softmax(output) + acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)) + acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5) + + # sync from other gpus for overall loss and acc + batch_size = paddle.to_tensor(images.shape[0]) + master_loss = paddle.to_tensor(loss.numpy()) + master_acc1 = paddle.to_tensor(acc1.numpy()) + master_acc5 = paddle.to_tensor(acc5.numpy()) + master_batch_size = paddle.to_tensor(batch_size.numpy()) + dist.all_reduce(master_loss) + dist.all_reduce(master_batch_size) + dist.all_reduce(master_acc1) + dist.all_reduce(master_acc5) + master_loss = master_loss / dist.get_world_size() + master_acc1 = master_acc1 / dist.get_world_size() + master_acc5 = master_acc5 / dist.get_world_size() + master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) + master_acc1_meter.update(master_acc1.numpy()[0], master_batch_size.numpy()[0]) + master_acc5_meter.update(master_acc5.numpy()[0], master_batch_size.numpy()[0]) + val_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) + val_acc1_meter.update(acc1.numpy()[0], batch_size.numpy()[0]) + val_acc5_meter.update(acc5.numpy()[0], batch_size.numpy()[0]) + + if batch_id % debug_steps == 0: + local_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Avg Loss: {val_loss_meter.avg:.4f}, " + + f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " + + f"Avg Acc@5: {val_acc5_meter.avg:.4f}") + master_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Avg Loss: {master_loss_meter.avg:.4f}, " + + f"Avg Acc@1: {master_acc1_meter.avg:.4f}, " + + f"Avg Acc@5: {master_acc5_meter.avg:.4f}") + write_log(local_logger, master_logger, local_message, master_message) + + val_time = time.time() - time_st + return (val_loss_meter.avg, + val_acc1_meter.avg, + val_acc5_meter.avg, + master_loss_meter.avg, + master_acc1_meter.avg, + master_acc5_meter.avg, + val_time) + + +def main_worker(*args): + # STEP 0: Preparation + dist.init_parallel_env() + world_size = dist.get_world_size() + local_rank = dist.get_rank() + config = args[0] + last_epoch = config.TRAIN.LAST_EPOCH + seed = config.SEED + local_rank + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + # logger for each process/gpu + local_logger = get_logger( + filename=os.path.join(config.SAVE, 'log_{}.txt'.format(local_rank)), + logger_name='local_logger') + # overall logger + if local_rank == 0: + master_logger = get_logger( + filename=os.path.join(config.SAVE, 'log.txt'), + logger_name='master_logger') + master_logger.info(f'\n{config}') + else: + master_logger = None + + message = f'----- world_size = {world_size}, local_rank = {local_rank}' + write_log(local_logger, master_logger, message) + + # STEP 1: Create model + model = build_model(config) + model = paddle.DataParallel(model) + + # STEP 2: Create train and val dataloader + if not config.EVAL: + dataset_train = args[1] + dataloader_train = get_dataloader(config, dataset_train, 'train', True) + total_batch_train = len(dataloader_train) + message = f'----- Total # of train batch (single gpu): {total_batch_train}' + write_log(local_logger, master_logger, message) + + dataset_val = args[2] + dataloader_val = get_dataloader(config, dataset_val, 'val', True) + total_batch_val = len(dataloader_val) + message = f'----- Total # of val batch (single gpu): {total_batch_val}' + write_log(local_logger, master_logger, message) + + # STEP 4: Define criterion + criterion = nn.CrossEntropyLoss() + # only use cross entropy for val + criterion_val = nn.CrossEntropyLoss() + + # STEP 4: Define optimizer and lr_scheduler + # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) + if config.TRAIN.LINEAR_SCALED_LR is not None: + linear_scaled_lr = ( + config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + linear_scaled_warmup_start_lr = ( + config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + linear_scaled_end_lr = ( + config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + + if config.TRAIN.ACCUM_ITER > 1: + linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER + linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER + linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER + + config.TRAIN.BASE_LR = linear_scaled_lr + config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr + config.TRAIN.END_LR = linear_scaled_end_lr + + lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale + config.TRAIN.END_LR, + config.TRAIN.NUM_EPOCHS, + len(dataloader_train), + warmup_epochs=config.TRAIN.WARMUP_EPOCHS) + + params_groups = get_params_groups(model) + + if config.TRAIN.GRAD_CLIP: + clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) + else: + clip = None + + if config.TRAIN.OPTIMIZER.NAME == "SGD": + optimizer = paddle.optimizer.Momentum( + parameters=params_groups, + learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, + weight_decay=config.TRAIN.WEIGHT_DECAY, + momentum=config.TRAIN.OPTIMIZER.MOMENTUM, + grad_clip=clip) + elif config.TRAIN.OPTIMIZER.NAME == "AdamW": + optimizer = paddle.optimizer.AdamW( + parameters=params_groups, + learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + beta1=config.TRAIN.OPTIMIZER.BETAS[0], + beta2=config.TRAIN.OPTIMIZER.BETAS[1], + weight_decay=config.TRAIN.WEIGHT_DECAY, + epsilon=config.TRAIN.OPTIMIZER.EPS, + grad_clip=clip) + else: + message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." + write_log(local_logger, master_logger, message, None, 'fatal') + raise NotImplementedError(message) + + # STEP 5: Load pretrained model / load resumt model and optimizer states + if config.MODEL.PRETRAINED: + assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True + model_state = paddle.load(config.MODEL.PRETRAINED + '.pdparams') + + if not config.EVAL: + keys = ['encoder.norm.weight', 'encoder.norm.bias', + 'classfier.weight', 'classifier.bias'] + if config.MODEL.GLOBAL_POOL: + del model_state[keys[0]] + del model_state[keys[1]] + + # interpolate position embedding + interpolate_pos_embed(model, model_state) + + model.set_dict(model_state) + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" + write_log(local_logger, master_logger, message) + + # for linearprobing + model._layers.classifier = nn.Sequential( + nn.BatchNorm1D(model._layers.classifier.weight.shape[0], weight_attr=False, epsilon=1e-6), + model._layers.classifier) + # freeze all but the classifier + for _, p in model.named_parameters(): + p.stop_gradient = True + for _, p in model._layers.classifier.named_parameters(): + p.stop_gradient = False + + if config.MODEL.RESUME: + assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True + assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True + model_state = paddle.load(config.MODEL.RESUME+'.pdparams') + model.set_dict(model_state) + opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') + optimizer.set_state_dict(opt_state) + message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}" + write_log(local_logger, master_logger, message) + + # STEP 6: Validation (eval mode) + if config.EVAL: + write_log(local_logger, master_logger, f"----- Start Validation") + val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( + dataloader=dataloader_val, + model=model, + criterion=criterion_val, + total_batch=total_batch_train, + debug_steps=config.REPORT_FREQ, + local_logger=local_logger, + master_logger=master_logger) + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {val_loss:.4f}, " + + f"Validation Acc@1: {val_acc1:.4f}, " + + f"Validation Acc@1: {val_acc5:.4f}, " + + f"time: {val_time:.2f}") + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {avg_loss:.4f}, " + + f"Validation Acc@1: {avg_acc1:.4f}, " + + f"Validation Acc@1: {avg_acc5:.4f}, " + + f"time: {val_time:.2f}") + + + + # STEP 7: Start training (train mode) + write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") + for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): + # train + write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") + + train_loss, train_acc, avg_loss, avg_acc, train_time = train( + dataloader=dataloader_train, + model=model, + optimizer=optimizer, + criterion=criterion, + lr_schedule=lr_schedule, + epoch=epoch, + total_epochs=config.TRAIN.NUM_EPOCHS, + total_batch=total_batch_train, + debug_steps=config.REPORT_FREQ, + accum_iter=config.TRAIN.ACCUM_ITER, + amp=config.AMP, + local_logger=local_logger, + master_logger=master_logger) + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Train Loss: {train_loss:.4f}, " + + f"Train Acc: {train_acc:.4f}, " + + f"time: {train_time:.2f}") + + master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Train Loss: {avg_loss:.4f}, " + + f"Train Acc: {avg_acc:.4f}, " + + f"time: {train_time:.2f}") + write_log(local_logger, master_logger, local_message, master_message) + + # validation + if epoch % config.VALIDATION_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: + write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}') + val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( + dataloader=dataloader_val, + model=model, + criterion=criterion_val, + total_batch=total_batch_train, + debug_steps=config.REPORT_FREQ, + local_logger=local_logger, + master_logger=master_logger) + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {val_loss:.4f}, " + + f"Validation Acc@1: {val_acc1:.4f}, " + + f"Validation Acc@1: {val_acc5:.4f}, " + + f"time: {val_time:.2f}") + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {avg_loss:.4f}, " + + f"Validation Acc@1: {avg_acc1:.4f}, " + + f"Validation Acc@1: {avg_acc5:.4f}, " + + f"time: {val_time:.2f}") + write_log(local_logger, master_logger, local_message, master_message) + + # model save + if local_rank == 0: + if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: + model_path = os.path.join( + config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}") + paddle.save(model.state_dict(), model_path + '.pdparams') + paddle.save(optimizer.state_dict(), model_path + '.pdopt') + message = (f"----- Save model: {model_path}.pdparams \n" + + f"----- Save optim: {model_path}.pdopt") + write_log(local_logger, master_logger, message) + + +def main(): + # config is updated by: (1) config.py, (2) yaml file, (3) arguments + arguments = get_arguments() + config = get_config() + config = update_config(config, arguments) + # set output folder + if not config.EVAL: + config.SAVE = '{}/linearprobe-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) + else: + config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) + if not os.path.exists(config.SAVE): + os.makedirs(config.SAVE, exist_ok=True) + # get dataset + if not config.EVAL: + dataset_train = get_dataset(config, mode='train') + else: + dataset_train = None + dataset_val = get_dataset(config, mode='val') + # start training + config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS + dist.spawn(main_worker, args=(config, dataset_train, dataset_val), nprocs=config.NGPUS) + + +if __name__ == "__main__": + main() diff --git a/image_classification/MAE/main_multi_gpu_pretrain.py b/image_classification/MAE/main_multi_gpu_pretrain.py index d1789ddf..e70af1d0 100644 --- a/image_classification/MAE/main_multi_gpu_pretrain.py +++ b/image_classification/MAE/main_multi_gpu_pretrain.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""MEA pre-training using multiple GPU """ +"""MAE pre-training using multiple GPU """ import sys import os @@ -31,6 +31,8 @@ from utils import AverageMeter from utils import WarmupCosineScheduler from utils import get_exclude_from_weight_decay_fn +from utils import get_params_groups +from utils import cosine_scheduler from config import get_config from config import update_config @@ -49,7 +51,6 @@ def get_arguments(): parser.add_argument('-resume', type=str, default=None) parser.add_argument('-last_epoch', type=int, default=None) parser.add_argument('-eval', action='store_true') - parser.add_argument('-mae_pretrain', action='store_true') parser.add_argument('-amp', action='store_true') arguments = parser.parse_args() return arguments @@ -74,15 +75,33 @@ def get_logger(filename, logger_name=None): return logger +def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): + if local_logger: + if level == 'info': + local_logger.info(msg_local) + elif level == 'fatal': + local_logger.fatal(msg_local) + else: + raise ValueError("level must in ['info', 'fatal']") + if master_logger and dist.get_rank() == 0: + if msg_master is None: + msg_master = msg_local + if level == 'info': + master_logger.info("MASTER_LOG " + msg_master) + elif level == 'fatal': + master_logger.fatal("MASTER_LOG " + msg_master) + else: + raise ValueError("level must in ['info', 'fatal']") + + def train(dataloader, - patch_size, model, - criterion, + mask_ratio, optimizer, + lr_schedule, epoch, total_epochs, total_batch, - normalize_target=True, debug_steps=100, accum_iter=1, amp=False, @@ -91,119 +110,88 @@ def train(dataloader, """Training for one epoch Args: dataloader: paddle.io.DataLoader, dataloader instance - patch_size: int/tuple, image patch size model: nn.Layer, a ViT model - criterion: nn.criterion + mask_ratio: float, percentage of masking patches + optimizer: nn.optimizer + lr_schedule: list of float, lr schdeule epoch: int, current epoch total_epochs: int, total num of epochs - normalize_target: bool, if True, tokens are normalized by itself, default: True total_batch: int, total num of batches for one epoch debug_steps: int, num of iters to log info, default: 100 accum_iter: int, num of iters for accumulating gradients, default: 1 - mixup_fn: Mixup, mixup instance, default: None amp: bool, if True, use mix precision training, default: False local_logger: logger for local process/gpu, default: None master_logger: logger for main process, default: None Returns: train_loss_meter.avg: float, average loss on current process/gpu - train_acc_meter.avg: float, average top1 accuracy on current process/gpu - master_train_loss_meter.avg: float, average loss on all processes/gpus - master_train_acc_meter.avg: float, average top1 accuracy on all processes/gpus + master_loss_meter.avg: float, average loss on all processes/gpus train_time: float, training time """ model.train() train_loss_meter = AverageMeter() - master_train_loss_meter = AverageMeter() + master_loss_meter = AverageMeter() if amp is True: - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) + scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 time_st = time.time() for batch_id, data in enumerate(dataloader): + # get data images = data[0] - masks = paddle.to_tensor(data[1], dtype='bool') - - with paddle.no_grad(): - mean = paddle.to_tensor([0.485, 0.456, 0.406]).reshape([1, 3, 1, 1]) - std = paddle.to_tensor([0.229, 0.224, 0.225]).reshape([1, 3, 1, 1]) - unnorm_images = images * std + mean - B, C, H, W = images.shape - if normalize_target: - images_patch = unnorm_images.reshape([B, C, H//patch_size, patch_size, W//patch_size, patch_size]) - images_patch = images_patch.transpose([0, 2, 4, 3, 5, 1]) - images_patch = unnorm_images.reshape([B, -1, patch_size * patch_size, C]) - images_patch = (images_patch - images_patch.mean(axis=-2, keepdim=True)) / ( - images_patch.var(axis=-2, keepdim=True).sqrt() + 1e-6) - images_patch = images_patch.flatten(-2) - else: - images_patch = unnorm_images.reshape([B, C, H//patch_size, patch_size, W//patch_size, patch_size]) - images_patch = images_patch.transpose([0, 2, 4, 3, 5, 1]) - images_patch = unnorm_images.reshape([B, -1, patch_size * patch_size, C]) - images_patch = images_patch.flatten(-2) - - B, _, C = images_patch.shape - labels = images_patch[masks[:, 1:]].reshape([B, -1, C]) - - if amp is True: - with paddle.amp.auto_cast(): - reconstructed_patches = model(images, masks) - loss = criterion(reconstructed_patches, labels) - scaled = scaler.scale(loss) - scaled.backward() - + # set per iteration lr using scheduler + global_train_iter = total_batch * (epoch - 1) + batch_id # epoch starts from 1 + optimizer.set_lr(lr_schedule[global_train_iter]) + # forward + with paddle.amp.auto_cast(amp is True): + loss, _, _ = model(images, mask_ratio) + + if not amp: # fp32 + loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - scaler.minimize(optimizer, scaled) + optimizer.step() optimizer.clear_grad() else: - reconstructed_patches = model(images, masks) - loss = criterion(reconstructed_patches, labels) - # NOTE: division may be needed depending on the loss function - # Here no division is needed: - # default 'reduction' param in nn.CrossEntropyLoss is set to 'mean' - # loss = loss / accum_iter - loss.backward() - + scaled = scaler.scale(loss) + scaled.backward() + # TODO: check if manually unscale and clip grad is required here if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - optimizer.step() + # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 + scaler.step(optimizer) + scaler.update() optimizer.clear_grad() - batch_size = paddle.to_tensor(images.shape[0]) - # sync from other gpus for overall loss and acc - master_loss = loss.clone() - master_batch_size = batch_size.clone() + batch_size = paddle.to_tensor(images.shape[0]) + master_loss = paddle.to_tensor(loss.numpy()) + master_batch_size = paddle.to_tensor(batch_size.numpy()) dist.all_reduce(master_loss) dist.all_reduce(master_batch_size) master_loss = master_loss / dist.get_world_size() - master_train_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) - + master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) if batch_id % debug_steps == 0: - if local_logger: - local_logger.info( - f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {train_loss_meter.avg:.4f}") - if master_logger and dist.get_rank() == 0: - master_logger.info( - f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {master_train_loss_meter.avg:.4f}") + local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + + f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"LR: {optimizer.get_lr():.6e}, " + + f"Avg Loss: {train_loss_meter.avg:.4f}") + master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + + f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"LR: {optimizer.get_lr():.6e}, " + + f"Avg Loss: {master_loss_meter.avg:.4f}") + write_log(local_logger, master_logger, local_message, master_message) train_time = time.time() - time_st - return (train_loss_meter.avg, - master_train_loss_meter.avg, - train_time) + return train_loss_meter.avg, master_loss_meter.avg, train_time def main_worker(*args): # STEP 0: Preparation - config = args[0] dist.init_parallel_env() - last_epoch = config.TRAIN.LAST_EPOCH world_size = dist.get_world_size() local_rank = dist.get_rank() + config = args[0] + last_epoch = config.TRAIN.LAST_EPOCH seed = config.SEED + local_rank paddle.seed(seed) np.random.seed(seed) @@ -220,9 +208,9 @@ def main_worker(*args): master_logger.info(f'\n{config}') else: master_logger = None - local_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}') - if local_rank == 0: - master_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}') + + message = f'----- world_size = {world_size}, local_rank = {local_rank}' + write_log(local_logger, master_logger, message) # STEP 1: Create model model = build_model(config) @@ -232,12 +220,11 @@ def main_worker(*args): dataset_train = args[1] dataloader_train = get_dataloader(config, dataset_train, 'train', True) total_batch_train = len(dataloader_train) - local_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}') - if local_rank == 0: - master_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}') + message = f'----- Total # of train batch (single gpu): {total_batch_train}' + write_log(local_logger, master_logger, message) - # STEP 3: Define criterion - criterion = nn.MSELoss() + # STEP 3: Define criterion: loss is defined in model + #criterion = nn.MSELoss() # STEP 4: Define optimizer and lr_scheduler # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) @@ -258,107 +245,73 @@ def main_worker(*args): config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr config.TRAIN.END_LR = linear_scaled_end_lr - scheduler = None - if config.TRAIN.LR_SCHEDULER.NAME == "warmupcosine": - scheduler = WarmupCosineScheduler(learning_rate=config.TRAIN.BASE_LR, - warmup_start_lr=config.TRAIN.WARMUP_START_LR, - start_lr=config.TRAIN.BASE_LR, - end_lr=config.TRAIN.END_LR, - warmup_epochs=config.TRAIN.WARMUP_EPOCHS, - total_epochs=config.TRAIN.NUM_EPOCHS, - last_epoch=config.TRAIN.LAST_EPOCH, - ) - elif config.TRAIN.LR_SCHEDULER.NAME == "cosine": - scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=config.TRAIN.BASE_LR, - T_max=config.TRAIN.NUM_EPOCHS, - last_epoch=last_epoch) - elif config.scheduler == "multi-step": - milestones = [int(v.strip()) - for v in config.TRAIN.LR_SCHEDULER.MILESTONES.split(",")] - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=config.TRAIN.BASE_LR, - milestones=milestones, - gamma=config.TRAIN.LR_SCHEDULER.DECAY_RATE, - last_epoch=last_epoch) + lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale + config.TRAIN.END_LR, + config.TRAIN.NUM_EPOCHS, + len(dataloader_train), + warmup_epochs=config.TRAIN.WARMUP_EPOCHS) + + params_groups = get_params_groups(model) + + if config.TRAIN.GRAD_CLIP: + clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) else: - local_logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") - if local_rank == 0: - master_logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") - raise NotImplementedError(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") + clip = None if config.TRAIN.OPTIMIZER.NAME == "SGD": - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None optimizer = paddle.optimizer.Momentum( - parameters=model.parameters(), + parameters=params_groups, learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, grad_clip=clip) elif config.TRAIN.OPTIMIZER.NAME == "AdamW": - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None optimizer = paddle.optimizer.AdamW( - parameters=model.parameters(), - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, + parameters=params_groups, + learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], weight_decay=config.TRAIN.WEIGHT_DECAY, epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip, - #apply_decay_param_fun=get_exclude_from_weight_decay_fn(['pos_embed', 'cls_token']), - ) + grad_clip=clip) else: - local_logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") - if local_rank == 0: - master_logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") - raise NotImplementedError(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") + message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." + write_log(local_logger, master_logger, message, None, 'fatal') + raise NotImplementedError(message) # STEP 5: Load pretrained model / load resumt model and optimizer states if config.MODEL.PRETRAINED: - if (config.MODEL.PRETRAINED).endswith('.pdparams'): - raise ValueError( - f'{config.MODEL.PRETRAINED} should not contain .pdparams') assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams') model.set_dict(model_state) - local_logger.info(f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}") - if local_rank == 0: - master_logger.info( - f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}") + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" + write_log(local_logger, master_logger, message) if config.MODEL.RESUME: - assert os.path.isfile(config.MODEL.RESUME + '.pdparams') is True - assert os.path.isfile(config.MODEL.RESUME + '.pdopt') is True - model_state = paddle.load(config.MODEL.RESUME + '.pdparams') + assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True + assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True + model_state = paddle.load(config.MODEL.RESUME+'.pdparams') model.set_dict(model_state) - opt_state = paddle.load(config.MODEL.RESUME + '.pdopt') + opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') optimizer.set_state_dict(opt_state) - local_logger.info( - f"----- Resume: Load model and optmizer from {config.MODEL.RESUME}") - if local_rank == 0: - master_logger.info( - f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}") + message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}" + write_log(local_logger, master_logger, message) + if config.TRAIN.LAST_EPOCH == -1: + message = f"----- Resume Training: LAST_EPOCH should not be [-1]" + write_log(local_logger, master_logger, message, None, 'fatal') # STEP 6: Start training (train mode) - local_logger.info(f"Start training from epoch {last_epoch+1}.") - if local_rank == 0: - master_logger.info(f"Start training from epoch {last_epoch+1}.") - for epoch in range(last_epoch+1, config.TRAIN.NUM_EPOCHS+1): + write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") + for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): # train - local_logger.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}") - if local_rank == 0: - master_logger.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}") + write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") - train_loss,avg_loss, train_time = train( + train_loss, avg_loss, train_time = train( dataloader=dataloader_train, - patch_size=config.MODEL.TRANS.PATCH_SIZE, model=model, - criterion=criterion, + mask_ratio=config.MODEL.TRANS.MASK_RATIO, optimizer=optimizer, + lr_schedule=lr_schedule, epoch=epoch, total_epochs=config.TRAIN.NUM_EPOCHS, total_batch=total_batch_train, @@ -368,15 +321,14 @@ def main_worker(*args): local_logger=local_logger, master_logger=master_logger) - scheduler.step() + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Train Loss: {train_loss:.4f}, " + + f"time: {train_time:.2f}") - local_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {train_loss:.4f}, " + + master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Train Loss: {avg_loss:.4f}, " + f"time: {train_time:.2f}") - if local_rank == 0: - master_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {avg_loss:.4f}, " + - f"time: {train_time:.2f}") + write_log(local_logger, master_logger, local_message, master_message) # model save if local_rank == 0: @@ -385,11 +337,9 @@ def main_worker(*args): config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}") paddle.save(model.state_dict(), model_path + '.pdparams') paddle.save(optimizer.state_dict(), model_path + '.pdopt') - local_logger.info(f"----- Save model: {model_path}.pdparams") - local_logger.info(f"----- Save optim: {model_path}.pdopt") - if local_rank == 0: - master_logger.info(f"----- Save model: {model_path}.pdparams") - master_logger.info(f"----- Save optim: {model_path}.pdopt") + message = (f"----- Save model: {model_path}.pdparams \n" + + f"----- Save optim: {model_path}.pdopt") + write_log(local_logger, master_logger, message) def main(): @@ -397,18 +347,13 @@ def main(): arguments = get_arguments() config = get_config() config = update_config(config, arguments) - # set output folder - if not config.EVAL: - config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - else: - config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - + config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) if not os.path.exists(config.SAVE): os.makedirs(config.SAVE, exist_ok=True) - - # get dataset and start DDP + # get dataset dataset_train = get_dataset(config, mode='train') + # start training config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS dist.spawn(main_worker, args=(config, dataset_train, ), nprocs=config.NGPUS) diff --git a/image_classification/MAE/main_single_gpu_finetune.py b/image_classification/MAE/main_single_gpu_finetune.py deleted file mode 100644 index ea267943..00000000 --- a/image_classification/MAE/main_single_gpu_finetune.py +++ /dev/null @@ -1,403 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""ViT finetuning/validation using single GPU """ - -import sys -import os -import time -import logging -import argparse -import random -import numpy as np -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -from datasets import get_dataloader -from datasets import get_dataset -from transformer import build_mae_finetune as build_model -from utils import AverageMeter -from utils import WarmupCosineScheduler -from config import get_config -from config import update_config -from mixup import Mixup -from losses import LabelSmoothingCrossEntropyLoss -from losses import SoftTargetCrossEntropyLoss - - -def get_arguments(): - """return argumeents, this will overwrite the config after loading yaml file""" - parser = argparse.ArgumentParser('ViT') - parser.add_argument('-cfg', type=str, default=None) - parser.add_argument('-dataset', type=str, default=None) - parser.add_argument('-batch_size', type=int, default=None) - parser.add_argument('-image_size', type=int, default=None) - parser.add_argument('-data_path', type=str, default=None) - parser.add_argument('-output', type=str, default=None) - parser.add_argument('-ngpus', type=int, default=None) - parser.add_argument('-pretrained', type=str, default=None) - parser.add_argument('-resume', type=str, default=None) - parser.add_argument('-last_epoch', type=int, default=None) - parser.add_argument('-eval', action='store_true') - parser.add_argument('-mae_pretrain', action='store_true') - parser.add_argument('-amp', action='store_true') - arguments = parser.parse_args() - return arguments - - -def get_logger(filename, logger_name=None): - """set logging file and format - Args: - filename: str, full path of the logger file to write - logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' - Return: - logger: python logger - """ - log_format = "%(asctime)s %(message)s" - logging.basicConfig(stream=sys.stdout, level=logging.INFO, - format=log_format, datefmt="%m%d %I:%M:%S %p") - # different name is needed when creating multiple logger in one process - logger = logging.getLogger(logger_name) - fh = logging.FileHandler(os.path.join(filename)) - fh.setFormatter(logging.Formatter(log_format)) - logger.addHandler(fh) - return logger - - -def train(dataloader, - model, - criterion, - optimizer, - epoch, - total_epochs, - total_batch, - debug_steps=100, - accum_iter=1, - mixup_fn=None, - amp=False, - logger=None): - """Training for one epoch - Args: - dataloader: paddle.io.DataLoader, dataloader instance - model: nn.Layer, a ViT model - criterion: nn.criterion - epoch: int, current epoch - total_epochs: int, total num of epochs - total_batch: int, total num of batches for one epoch - debug_steps: int, num of iters to log info, default: 100 - accum_iter: int, num of iters for accumulating gradients, default: 1 - mixup_fn: Mixup, mixup instance, default: None - amp: bool, if True, use mix precision training, default: False - logger: logger for logging, default: None - Returns: - train_loss_meter.avg: float, average loss on current process/gpu - train_acc_meter.avg: float, average top1 accuracy on current process/gpu - train_time: float, training time - """ - model.train() - train_loss_meter = AverageMeter() - train_acc_meter = AverageMeter() - - if amp is True: - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - time_st = time.time() - - - for batch_id, data in enumerate(dataloader): - image = data[0] - label = data[1] - label_orig = label.clone() - - if mixup_fn is not None: - image, label = mixup_fn(image, label_orig) - - if amp is True: # mixed precision training - with paddle.amp.auto_cast(): - output = model(image) - loss = criterion(output, label) - scaled = scaler.scale(loss) - scaled.backward() - - if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - scaler.minimize(optimizer, scaled) - optimizer.clear_grad() - - else: - output = model(image) - loss = criterion(output, label) - # NOTE: division may be needed depending on the loss function - # Here no division is needed: - # default 'reduction' param in nn.CrossEntropyLoss is set to 'mean' - # loss = loss / accum_iter - loss.backward() - - if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - optimizer.step() - optimizer.clear_grad() - - pred = F.softmax(output) - if mixup_fn: - acc = paddle.metric.accuracy(pred, label_orig) - else: - acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1)) - - batch_size = image.shape[0] - train_loss_meter.update(loss.numpy()[0], batch_size) - train_acc_meter.update(acc.numpy()[0], batch_size) - - if logger and batch_id % debug_steps == 0: - logger.info( - f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {train_loss_meter.avg:.4f}, " + - f"Avg Acc: {train_acc_meter.avg:.4f}") - - train_time = time.time() - time_st - return train_loss_meter.avg, train_acc_meter.avg, train_time - - -def validate(dataloader, model, criterion, total_batch, debug_steps=100, logger=None): - """Validation for whole dataset - Args: - dataloader: paddle.io.DataLoader, dataloader instance - model: nn.Layer, a ViT model - criterion: nn.criterion - total_batch: int, total num of batches for one epoch - debug_steps: int, num of iters to log info, default: 100 - logger: logger for logging, default: None - Returns: - val_loss_meter.avg: float, average loss on current process/gpu - val_acc1_meter.avg: float, average top1 accuracy on current process/gpu - val_acc5_meter.avg: float, average top5 accuracy on current process/gpu - val_time: float, valitaion time - """ - model.eval() - val_loss_meter = AverageMeter() - val_acc1_meter = AverageMeter() - val_acc5_meter = AverageMeter() - time_st = time.time() - - with paddle.no_grad(): - for batch_id, data in enumerate(dataloader): - image = data[0] - label = data[1] - - output = model(image) - loss = criterion(output, label) - - pred = F.softmax(output) - acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)) - acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5) - - batch_size = image.shape[0] - val_loss_meter.update(loss.numpy()[0], batch_size) - val_acc1_meter.update(acc1.numpy()[0], batch_size) - val_acc5_meter.update(acc5.numpy()[0], batch_size) - - if logger and batch_id % debug_steps == 0: - logger.info( - f"Val Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {val_loss_meter.avg:.4f}, " + - f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " + - f"Avg Acc@5: {val_acc5_meter.avg:.4f}") - - val_time = time.time() - time_st - return val_loss_meter.avg, val_acc1_meter.avg, val_acc5_meter.avg, val_time - - -def main(): - # 0. Preparation - # config is updated by: (1) config.py, (2) yaml file, (3) arguments - arguments = get_arguments() - config = get_config() - config = update_config(config, arguments) - # set output folder - if not config.EVAL: - config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - else: - config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - if not os.path.exists(config.SAVE): - os.makedirs(config.SAVE, exist_ok=True) - last_epoch = config.TRAIN.LAST_EPOCH - seed = config.SEED - paddle.seed(seed) - np.random.seed(seed) - random.seed(seed) - logger = get_logger(filename=os.path.join(config.SAVE, 'log.txt')) - logger.info(f'\n{config}') - - # 1. Create model - model = build_model(config) - # 2. Create train dataloader - dataset_train = get_dataset(config, mode='train') - dataset_val = get_dataset(config, mode='val') - dataloader_train = get_dataloader(config, dataset_train, 'train', False) - dataloader_val = get_dataloader(config, dataset_val, 'val', False) - # 3. Define Mixup function and criterion - mixup_fn = None - if config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or config.TRAIN.CUTMIX_MINMAX is not None: - mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA, - cutmix_alpha=config.TRAIN.CUTMIX_ALPHA, - cutmix_minmax=config.TRAIN.CUTMIX_MINMAX, - prob=config.TRAIN.MIXUP_PROB, - switch_prob=config.TRAIN.MIXUP_SWITCH_PROB, - mode=config.TRAIN.MIXUP_MODE, - label_smoothing=config.TRAIN.SMOOTHING, - num_classes=config.MODEL.NUM_CLASSES) - - if config.TRAIN.MIXUP_PROB > 0.: - criterion = SoftTargetCrossEntropyLoss() - elif config.TRAIN.SMOOTHING: - criterion = LabelSmoothingCrossEntropyLoss() - else: - criterion = nn.CrossEntropyLoss() - # only use cross entropy for val - criterion_val = nn.CrossEntropyLoss() - # 4. Define lr_scheduler - scheduler = None - if config.TRAIN.LR_SCHEDULER.NAME == "warmupcosine": - scheduler = WarmupCosineScheduler(learning_rate=config.TRAIN.BASE_LR, - warmup_start_lr=config.TRAIN.WARMUP_START_LR, - start_lr=config.TRAIN.BASE_LR, - end_lr=config.TRAIN.END_LR, - warmup_epochs=config.TRAIN.WARMUP_EPOCHS, - total_epochs=config.TRAIN.NUM_EPOCHS, - last_epoch=config.TRAIN.LAST_EPOCH, - ) - elif config.TRAIN.LR_SCHEDULER.NAME == "cosine": - scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=config.TRAIN.BASE_LR, - T_max=config.TRAIN.NUM_EPOCHS, - last_epoch=last_epoch) - elif config.scheduler == "multi-step": - milestones = [int(v.strip()) for v in config.TRAIN.LR_SCHEDULER.MILESTONES.split(",")] - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=config.TRAIN.BASE_LR, - milestones=milestones, - gamma=config.TRAIN.LR_SCHEDULER.DECAY_RATE, - last_epoch=last_epoch) - else: - logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") - raise NotImplementedError(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") - # 5. Define optimizer - if config.TRAIN.OPTIMIZER.NAME == "SGD": - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None - optimizer = paddle.optimizer.Momentum( - parameters=model.parameters(), - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, - weight_decay=config.TRAIN.WEIGHT_DECAY, - momentum=config.TRAIN.OPTIMIZER.MOMENTUM, - grad_clip=clip) - elif config.TRAIN.OPTIMIZER.NAME == "AdamW": - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None - optimizer = paddle.optimizer.AdamW( - parameters=model.parameters(), - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, - weight_decay=config.TRAIN.WEIGHT_DECAY, - beta1=config.TRAIN.OPTIMIZER.BETAS[0], - beta2=config.TRAIN.OPTIMIZER.BETAS[1], - epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip) - else: - logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") - raise NotImplementedError(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") - # 6. Load pretrained model or load resume model and optimizer states - if config.MODEL.PRETRAINED: - if (config.MODEL.PRETRAINED).endswith('.pdparams'): - raise ValueError(f'{config.MODEL.PRETRAINED} should not contain .pdparams') - assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True - model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams') - model.set_dict(model_state) - logger.info(f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}") - - if config.MODEL.RESUME: - assert os.path.isfile(config.MODEL.RESUME + '.pdparams') is True - assert os.path.isfile(config.MODEL.RESUME + '.pdopt') is True - model_state = paddle.load(config.MODEL.RESUME + '.pdparams') - model.set_dict(model_state) - opt_state = paddle.load(config.MODEL.RESUME + '.pdopt') - optimizer.set_state_dict(opt_state) - logger.info( - f"----- Resume: Load model and optmizer from {config.MODEL.RESUME}") - - # STEP 7: Validation (eval mode) - if config.EVAL: - logger.info('----- Start Validating') - val_loss, val_acc1, val_acc5, val_time = validate( - dataloader=dataloader_val, - model=model, - criterion=criterion_val, - total_batch=len(dataloader_val), - debug_steps=config.REPORT_FREQ, - logger=logger) - logger.info(f"Validation Loss: {val_loss:.4f}, " + - f"Validation Acc@1: {val_acc1:.4f}, " + - f"Validation Acc@5: {val_acc5:.4f}, " + - f"time: {val_time:.2f}") - return - - # STEP 8: Start training and validation (train mode) - logger.info(f"Start training from epoch {last_epoch+1}.") - for epoch in range(last_epoch+1, config.TRAIN.NUM_EPOCHS+1): - # train - logger.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}") - train_loss, train_acc, train_time = train(dataloader=dataloader_train, - model=model, - criterion=criterion, - optimizer=optimizer, - epoch=epoch, - total_epochs=config.TRAIN.NUM_EPOCHS, - total_batch=len(dataloader_train), - debug_steps=config.REPORT_FREQ, - accum_iter=config.TRAIN.ACCUM_ITER, - mixup_fn=mixup_fn, - amp=config.AMP, - logger=logger) - scheduler.step() - - logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {train_loss:.4f}, " + - f"Train Acc: {train_acc:.4f}, " + - f"time: {train_time:.2f}") - # validation - if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: - logger.info(f'----- Validation after Epoch: {epoch}') - val_loss, val_acc1, val_acc5, val_time = validate( - dataloader=dataloader_val, - model=model, - criterion=criterion_val, - total_batch=len(dataloader_val), - debug_steps=config.REPORT_FREQ, - logger=logger) - logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Validation Loss: {val_loss:.4f}, " + - f"Validation Acc@1: {val_acc1:.4f}, " + - f"Validation Acc@5: {val_acc5:.4f}, " + - f"time: {val_time:.2f}") - # model save - if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: - model_path = os.path.join( - config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}") - paddle.save(model.state_dict(), model_path + '.pdparams') - paddle.save(optimizer.state_dict(), model_path + '.pdopt') - logger.info(f"----- Save model: {model_path}.pdparams") - logger.info(f"----- Save optim: {model_path}.pdopt") - - -if __name__ == "__main__": - main() diff --git a/image_classification/MAE/main_single_gpu_pretrain.py b/image_classification/MAE/main_single_gpu_pretrain.py deleted file mode 100644 index cf315a42..00000000 --- a/image_classification/MAE/main_single_gpu_pretrain.py +++ /dev/null @@ -1,308 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MAE pre-training using single GPU, this is just a demo, we recommand using multi-gpu version""" - -import sys -import os -import time -import logging -import argparse -import random -import numpy as np -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -from datasets import get_dataloader -from datasets import get_dataset -from transformer import build_mae_pretrain as build_model -from utils import AverageMeter -from utils import WarmupCosineScheduler -from config import get_config -from config import update_config - - -def get_arguments(): - """return argumeents, this will overwrite the config after loading yaml file""" - parser = argparse.ArgumentParser('MAE') - parser.add_argument('-cfg', type=str, default=None) - parser.add_argument('-dataset', type=str, default=None) - parser.add_argument('-batch_size', type=int, default=None) - parser.add_argument('-image_size', type=int, default=None) - parser.add_argument('-data_path', type=str, default=None) - parser.add_argument('-output', type=str, default=None) - parser.add_argument('-ngpus', type=int, default=None) - parser.add_argument('-pretrained', type=str, default=None) - parser.add_argument('-resume', type=str, default=None) - parser.add_argument('-last_epoch', type=int, default=None) - parser.add_argument('-eval', action='store_true') - parser.add_argument('-mae_pretrain', action='store_true') - parser.add_argument('-amp', action='store_true') - arguments = parser.parse_args() - return arguments - - -def get_logger(filename, logger_name=None): - """set logging file and format - Args: - filename: str, full path of the logger file to write - logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' - Return: - logger: python logger - """ - log_format = "%(asctime)s %(message)s" - logging.basicConfig(stream=sys.stdout, level=logging.INFO, - format=log_format, datefmt="%m%d %I:%M:%S %p") - # different name is needed when creating multiple logger in one process - logger = logging.getLogger(logger_name) - fh = logging.FileHandler(os.path.join(filename)) - fh.setFormatter(logging.Formatter(log_format)) - logger.addHandler(fh) - return logger - - -def train(dataloader, - patch_size, - model, - criterion, - optimizer, - epoch, - total_epochs, - total_batch, - normalize_target=True, - debug_steps=100, - accum_iter=1, - amp=False, - logger=None): - """Training for one epoch - Args: - dataloader: paddle.io.DataLoader, dataloader instance - model: nn.Layer, a ViT model - criterion: nn.criterion - epoch: int, current epoch - total_epochs: int, total num of epochs - total_batch: int, total num of batches for one epoch - debug_steps: int, num of iters to log info, default: 100 - accum_iter: int, num of iters for accumulating gradients, default: 1 - amp: bool, if True, use mix precision training, default: False - logger: logger for logging, default: None - Returns: - train_loss_meter.avg: float, average loss on current process/gpu - train_time: float, training time - """ - model.train() - train_loss_meter = AverageMeter() - - if amp is True: - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - time_st = time.time() - - for batch_id, data in enumerate(dataloader): - images = data[0] - masks = paddle.to_tensor(data[1], dtype='bool') - - with paddle.no_grad(): - mean = paddle.to_tensor([0.485, 0.456, 0.406]).reshape([1, 3, 1, 1]) - std = paddle.to_tensor([0.229, 0.224, 0.225]).reshape([1, 3, 1, 1]) - unnorm_images = images * std + mean - B, C, H, W = images.shape - if normalize_target: - images_patch = unnorm_images.reshape([B, C, H // patch_size, patch_size, W // patch_size, patch_size]) - images_patch = images_patch.transpose([0, 2, 4, 3, 5, 1]) - images_patch = images_patch.reshape([B, -1, patch_size * patch_size, C]) - images_patch = (images_patch - images_patch.mean(axis=-2, keepdim=True)) / ( - images_patch.var(axis=-2, keepdim=True).sqrt() + 1e-6) - images_patch = images_patch.flatten(-2) - else: - images_patch = unnorm_images.reshape([B, C, H//patch_size, patch_size, W//patch_size, patch_size]) - images_patch = images_patch.transpose([0, 2, 4, 3, 5, 1]) - images_patch = images_patch.reshape([B, -1, patch_size * patch_size, C]) - images_patch = images_patch.flatten(-2) - - B, _, C = images_patch.shape - labels = images_patch[masks[:, 1:]].reshape([B, -1, C]) - - if amp is True: - with paddle.amp.auto_cast(): - reconstructed_patches = model(images, masks) - loss = criterion(reconstructed_patches, labels) - scaled = scaler.scale(loss) - scaled.backward() - - if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - scaler.minimize(optimizer, scaled) - optimizer.clear_grad() - else: - reconstructed_patches = model(images, masks) - loss = criterion(reconstructed_patches, labels) - # NOTE: division may be needed depending on the loss function - # Here no division is needed: - # default 'reduction' param in nn.CrossEntropyLoss is set to 'mean' - # loss = loss / accum_iter - loss.backward() - - if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - optimizer.step() - optimizer.clear_grad() - - batch_size = images.shape[0] - train_loss_meter.update(loss.numpy()[0], batch_size) - - if logger and batch_id % debug_steps == 0: - logger.info( - f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {train_loss_meter.avg:.4f}") - - train_time = time.time() - time_st - return train_loss_meter.avg, train_time - - -def main(): - # 0. Preparation - # config is updated by: (1) config.py, (2) yaml file, (3) arguments - arguments = get_arguments() - config = get_config() - config = update_config(config, arguments) - # set output folder - if not config.EVAL: - config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - else: - config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - if not os.path.exists(config.SAVE): - os.makedirs(config.SAVE, exist_ok=True) - last_epoch = config.TRAIN.LAST_EPOCH - seed = config.SEED - paddle.seed(seed) - np.random.seed(seed) - random.seed(seed) - logger = get_logger(filename=os.path.join(config.SAVE, 'log.txt')) - logger.info(f'\n{config}') - - # 1. Create model - model = build_model(config) - # 2. Create train dataloader - dataset_train = get_dataset(config, mode='train') - dataloader_train = get_dataloader(config, dataset_train, 'train', False) - # 3. Define criterion - criterion = nn.MSELoss() - # 4. Define lr_scheduler - scheduler = None - if config.TRAIN.LR_SCHEDULER.NAME == "warmupcosine": - scheduler = WarmupCosineScheduler(learning_rate=config.TRAIN.BASE_LR, - warmup_start_lr=config.TRAIN.WARMUP_START_LR, - start_lr=config.TRAIN.BASE_LR, - end_lr=config.TRAIN.END_LR, - warmup_epochs=config.TRAIN.WARMUP_EPOCHS, - total_epochs=config.TRAIN.NUM_EPOCHS, - last_epoch=config.TRAIN.LAST_EPOCH, - ) - elif config.TRAIN.LR_SCHEDULER.NAME == "cosine": - scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=config.TRAIN.BASE_LR, - T_max=config.TRAIN.NUM_EPOCHS, - last_epoch=last_epoch) - elif config.scheduler == "multi-step": - milestones = [int(v.strip()) for v in config.TRAIN.LR_SCHEDULER.MILESTONES.split(",")] - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=config.TRAIN.BASE_LR, - milestones=milestones, - gamma=config.TRAIN.LR_SCHEDULER.DECAY_RATE, - last_epoch=last_epoch) - else: - logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") - raise NotImplementedError(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.") - # 5. Define optimizer - if config.TRAIN.OPTIMIZER.NAME == "SGD": - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None - optimizer = paddle.optimizer.Momentum( - parameters=model.parameters(), - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, - weight_decay=config.TRAIN.WEIGHT_DECAY, - momentum=config.TRAIN.OPTIMIZER.MOMENTUM, - grad_clip=clip) - elif config.TRAIN.OPTIMIZER.NAME == "AdamW": - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None - optimizer = paddle.optimizer.AdamW( - parameters=model.parameters(), - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, - weight_decay=config.TRAIN.WEIGHT_DECAY, - beta1=config.TRAIN.OPTIMIZER.BETAS[0], - beta2=config.TRAIN.OPTIMIZER.BETAS[1], - epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip) - else: - logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") - raise NotImplementedError(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.") - # 6. Load pretrained model or load resume model and optimizer states - if config.MODEL.PRETRAINED: - if (config.MODEL.PRETRAINED).endswith('.pdparams'): - raise ValueError(f'{config.MODEL.PRETRAINED} should not contain .pdparams') - assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True - model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams') - model.set_dict(model_state) - logger.info(f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}") - - if config.MODEL.RESUME: - assert os.path.isfile(config.MODEL.RESUME + '.pdparams') is True - assert os.path.isfile(config.MODEL.RESUME + '.pdopt') is True - model_state = paddle.load(config.MODEL.RESUME + '.pdparams') - model.set_dict(model_state) - opt_state = paddle.load(config.MODEL.RESUME + '.pdopt') - optimizer.set_state_dict(opt_state) - logger.info( - f"----- Resume: Load model and optmizer from {config.MODEL.RESUME}") - - # 7. Start training and validation - logging.info(f"Start training from epoch {last_epoch + 1}.") - for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): - # train - logging.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}") - train_loss, train_time = train(dataloader=dataloader_train, - patch_size=config.MODEL.TRANS.PATCH_SIZE, - model=model, - criterion=criterion, - optimizer=optimizer, - epoch=epoch, - total_epochs=config.TRAIN.NUM_EPOCHS, - total_batch=len(dataloader_train), - normalize_target=config.TRAIN.NORMALIZE_TARGET, - debug_steps=config.REPORT_FREQ, - accum_iter=config.TRAIN.ACCUM_ITER, - amp=config.AMP, - logger=logger) - scheduler.step() - - logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {train_loss:.4f}, " + - f"time: {train_time:.2f}") - # validation - # No need to do validation during pretraining - - # model save - if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: - model_path = os.path.join( - config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}") - paddle.save(model.state_dict(), model_path + '.pdparams') - paddle.save(optimizer.state_dict(), model_path + '.pdopt') - logger.info(f"----- Save model: {model_path}.pdparams") - logger.info(f"----- Save optim: {model_path}.pdopt") - - -if __name__ == "__main__": - main() diff --git a/image_classification/MAE/masking_generator.py b/image_classification/MAE/masking_generator.py deleted file mode 100644 index 9271dd4e..00000000 --- a/image_classification/MAE/masking_generator.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -random mask generator for MAE pretraining -""" - -import random -import math -import numpy as np - -class RandomMaskingGenerator: - def __init__(self, input_size, mask_ratio, with_cls_token=True): - if not isinstance(input_size, tuple): - input_size = (input_size, ) * 2 - - self.height = input_size[0] - self.width = input_size[1] - self.num_patches = self.height * self.width - self.num_mask = int(mask_ratio * self.num_patches) - self.with_cls_token = with_cls_token - - def __call__(self): - mask = np.hstack([np.zeros(self.num_patches - self.num_mask), - np.ones(self.num_mask)]) - np.random.shuffle(mask) - if self.with_cls_token: - mask = np.insert(mask, 0, 0) - return mask - - -#def main(): -# rmg = RandomMaskingGenerator(input_size=32, mask_ratio=0.75) -# mask = rmg() -# for v in mask: -# print(v, end=', ') -# -#if __name__ == "__main__": -# main() diff --git a/image_classification/MAE/nohup.out b/image_classification/MAE/nohup.out deleted file mode 100644 index 6e00dda7..00000000 --- a/image_classification/MAE/nohup.out +++ /dev/null @@ -1,9507 +0,0 @@ -Traceback (most recent call last): - File "main_multi_gpu_pretrain.py", line 24, in - import paddle - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/__init__.py", line 25, in - from .fluid import monkey_patch_variable - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/__init__.py", line 45, in - from . import dataset - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dataset.py", line 19, in - from ..utils import deprecated - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/utils/__init__.py", line 26, in - from . import download # noqa: F401 - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/utils/download.py", line 23, in - import requests - File "/opt/conda/envs/py36/lib/python3.6/site-packages/requests/__init__.py", line 112, in - from . import utils - File "/opt/conda/envs/py36/lib/python3.6/site-packages/requests/utils.py", line 24, in - from . import certs - File "", line 971, in _find_and_load - File "", line 955, in _find_and_load_unlocked - File "", line 665, in _load_unlocked - File "", line 674, in exec_module - File "", line 764, in get_code - File "", line 833, in get_data -KeyboardInterrupt -merging config from ./configs/vit_base_patch16_224_pretrain_dec1.yaml ------ Imagenet2012 image train list len = 1281167 -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:30053', '127.0.0.1:54949', '127.0.0.1:41862', '127.0.0.1:28777', '127.0.0.1:55177', '127.0.0.1:18423', '127.0.0.1:46681'] -I1219 16:59:41.631045 23562 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:30053 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:54949', '127.0.0.1:41862', '127.0.0.1:28777', '127.0.0.1:55177', '127.0.0.1:18423', '127.0.0.1:46681'] -I1219 16:59:44.247634 23580 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:54949 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:41862', '127.0.0.1:28777', '127.0.0.1:55177', '127.0.0.1:18423', '127.0.0.1:46681'] -I1219 16:59:46.636570 23595 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:41862 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:28777', '127.0.0.1:55177', '127.0.0.1:18423', '127.0.0.1:46681'] -I1219 16:59:48.816335 23610 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:28777 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:55177', '127.0.0.1:18423', '127.0.0.1:46681'] -I1219 16:59:51.517431 23627 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:55177 successful. -I1219 16:59:53.801396 23642 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:18423 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:46681'] -I1219 16:59:56.182962 23659 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:46681 successful. -I1219 16:59:56.935767 23580 nccl_context.cc:74] init nccl context nranks: 8 local rank: 2 gpu id: 2 ring id: 0 -I1219 16:59:56.935765 23562 nccl_context.cc:74] init nccl context nranks: 8 local rank: 1 gpu id: 1 ring id: 0 -I1219 16:59:56.935781 23627 nccl_context.cc:74] init nccl context nranks: 8 local rank: 5 gpu id: 5 ring id: 0 -I1219 16:59:56.935775 23595 nccl_context.cc:74] init nccl context nranks: 8 local rank: 3 gpu id: 3 ring id: 0 -I1219 16:59:56.935791 23642 nccl_context.cc:74] init nccl context nranks: 8 local rank: 6 gpu id: 6 ring id: 0 -I1219 16:59:56.935806 23610 nccl_context.cc:74] init nccl context nranks: 8 local rank: 4 gpu id: 4 ring id: 0 -I1219 16:59:56.935818 23659 nccl_context.cc:74] init nccl context nranks: 8 local rank: 7 gpu id: 7 ring id: 0 -I1219 16:59:56.935837 23545 nccl_context.cc:74] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 0 -W1219 17:00:00.904070 23545 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:00:00.904078 23562 device_context.cc:447] Please NOTE: device: 1, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:00:00.904153 23595 device_context.cc:447] Please NOTE: device: 3, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:00:00.904173 23610 device_context.cc:447] Please NOTE: device: 4, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:00:00.904186 23659 device_context.cc:447] Please NOTE: device: 7, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:00:00.904246 23642 device_context.cc:447] Please NOTE: device: 6, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:00:00.904264 23627 device_context.cc:447] Please NOTE: device: 5, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:00:00.906248 23580 device_context.cc:447] Please NOTE: device: 2, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:00:00.957355 23562 device_context.cc:465] device: 1, cuDNN Version: 7.6. -W1219 17:00:00.957355 23659 device_context.cc:465] device: 7, cuDNN Version: 7.6. -W1219 17:00:00.957358 23595 device_context.cc:465] device: 3, cuDNN Version: 7.6. -W1219 17:00:00.957360 23545 device_context.cc:465] device: 0, cuDNN Version: 7.6. -W1219 17:00:00.957374 23610 device_context.cc:465] device: 4, cuDNN Version: 7.6. -W1219 17:00:00.957383 23642 device_context.cc:465] device: 6, cuDNN Version: 7.6. -W1219 17:00:00.957394 23580 device_context.cc:465] device: 2, cuDNN Version: 7.6. -W1219 17:00:00.957394 23627 device_context.cc:465] device: 5, cuDNN Version: 7.6. -INFO:local_logger:----- world_size = 8, local_rank = 6 -INFO:local_logger:----- world_size = 8, local_rank = 3 -INFO:master_logger: -AMP: False -BASE: [''] -DATA: - BATCH_SIZE: 256 - BATCH_SIZE_EVAL: 8 - CROP_PCT: 0.875 - DATASET: imagenet2012 - DATA_PATH: /dataset/imagenet - IMAGE_SIZE: 224 - NUM_WORKERS: 4 -EVAL: False -LOCAL_RANK: 0 -MODEL: - ATTENTION_DROPOUT: 0.1 - DROPOUT: 0.1 - DROPPATH: 0.0 - MAE_PRETRAIN: True - NAME: vit_base_patch16_224_dec1 - NUM_CLASSES: 1000 - PRETRAINED: None - RESUME: None - TRANS: - DECODER: - DEPTH: 1 - EMBED_DIM: 512 - NUM_HEADS: 8 - ENCODER: - DEPTH: 12 - EMBED_DIM: 768 - NUM_HEADS: 12 - MASK_RATIO: 0.75 - MLP_RATIO: 4.0 - PATCH_SIZE: 16 - QKV_BIAS: True - TYPE: MAE -NGPUS: 8 -REPORT_FREQ: 100 -SAVE: ./output/train-20211219-16-59-32 -SAVE_FREQ: 1 -SEED: 0 -TAG: default -TRAIN: - ACCUM_ITER: 2 - BASE_LR: 0.00015 - CUTMIX_ALPHA: 1.0 - CUTMIX_MINMAX: None - END_LR: 0.0005 - GRAD_CLIP: 1 - LAST_EPOCH: 0 - LINEAR_SCALED_LR: None - LR_SCHEDULER: - DECAY_EPOCHS: 30 - DECAY_RATE: 0.1 - MILESTONES: 30, 60, 90 - NAME: warmupcosine - MIXUP_ALPHA: 0.8 - MIXUP_MODE: batch - MIXUP_PROB: 1.0 - MIXUP_SWITCH_PROB: 0.5 - NORMALIZE_TARGET: True - NUM_EPOCHS: 800 - OPTIMIZER: - BETAS: (0.9, 0.95) - EPS: 1e-08 - MOMENTUM: 0.9 - NAME: AdamW - RAND_AUGMENT: False - RAND_AUGMENT_LAYERS: 9 - RAND_AUGMENT_MAGNITUDE: 5 - SMOOTHING: 0.1 - WARMUP_EPOCHS: 40 - WARMUP_START_LR: 1e-06 - WEIGHT_DECAY: 0.05 -VALIDATE_FREQ: 100 -INFO:local_logger:----- world_size = 8, local_rank = 0 -INFO:master_logger:----- world_size = 8, local_rank = 0 -INFO:local_logger:----- world_size = 8, local_rank = 7 -INFO:local_logger:----- world_size = 8, local_rank = 5 -INFO:local_logger:----- world_size = 8, local_rank = 1 -INFO:local_logger:----- world_size = 8, local_rank = 2 -INFO:local_logger:----- world_size = 8, local_rank = 4 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:master_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:master_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:master_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -ERROR: Unexpected BUS error encountered in DataLoader worker. This might be caused by insufficient shared memory (shm), please check whether use_shared_memory is set and storage space in /dev/shm is enough -Exception in thread Thread-1: -Traceback (most recent call last): - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dataloader/dataloader_iter.py", line 583, in _get_data - data = self._data_queue.get(timeout=self._timeout) - File "/opt/conda/envs/py36/lib/python3.6/multiprocessing/queues.py", line 105, in get - raise Empty -queue.Empty - -During handling of the above exception, another exception occurred: - -Traceback (most recent call last): - File "/opt/conda/envs/py36/lib/python3.6/threading.py", line 916, in _bootstrap_inner - self.run() - File "/opt/conda/envs/py36/lib/python3.6/threading.py", line 864, in run - self._target(*self._args, **self._kwargs) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dataloader/dataloader_iter.py", line 505, in _thread_loop - batch = self._get_data() - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dataloader/dataloader_iter.py", line 599, in _get_data - "pids: {}".format(len(failed_workers), pids)) -RuntimeError: DataLoader 1 workers exit unexpectedly, pids: 23832 - - - --------------------------------------- -C++ Traceback (most recent call last): --------------------------------------- -No stack trace in paddle, may be caused by external reasons. - ----------------------- -Error Message Summary: ----------------------- -FatalError: `Termination signal` is detected by the operating system. - [TimeInfo: *** Aborted at 1639904442 (unix time) try "date -d @1639904442" if you are using GNU date ***] - [SignalInfo: *** SIGTERM (@0x5be5) received by PID 23545 (TID 0x7f5dda7df700) from PID 23525 ***] - -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 20 leaked semaphores to clean up at shutdown - len(cache)) -Traceback (most recent call last): - File "main_multi_gpu_pretrain.py", line 416, in - main() - File "main_multi_gpu_pretrain.py", line 412, in main - dist.spawn(main_worker, args=(config, dataset_train, ), nprocs=config.NGPUS) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 502, in spawn - while not context.join(): - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 312, in join - self._throw_exception(error_index) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 330, in _throw_exception - raise Exception(msg) -Exception: - ----------------------------------------------- -Process 3 terminated with the following error: ----------------------------------------------- - -Traceback (most recent call last): - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 261, in _func_wrapper - result = func(*args) - File "/workspace/ppvit_github/PaddleViT_raw/PaddleViT/image_classification/MAE/main_multi_gpu_pretrain.py", line 368, in main_worker - master_logger=master_logger) - File "/workspace/ppvit_github/PaddleViT_raw/PaddleViT/image_classification/MAE/main_multi_gpu_pretrain.py", line 157, in train - reconstructed_patches = model(images, masks) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dygraph/layers.py", line 914, in __call__ - outputs = self.forward(*inputs, **kwargs) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dygraph/parallel.py", line 695, in forward - outputs = self._layers(*inputs, **kwargs) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dygraph/layers.py", line 914, in __call__ - outputs = self.forward(*inputs, **kwargs) - File "/workspace/ppvit_github/PaddleViT_raw/PaddleViT/image_classification/MAE/transformer.py", line 537, in forward - enc_out = self.encoder(no_mask_x) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dygraph/layers.py", line 914, in __call__ - outputs = self.forward(*inputs, **kwargs) - File "/workspace/ppvit_github/PaddleViT_raw/PaddleViT/image_classification/MAE/transformer.py", line 364, in forward - x = layer(x) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dygraph/layers.py", line 914, in __call__ - outputs = self.forward(*inputs, **kwargs) - File "/workspace/ppvit_github/PaddleViT_raw/PaddleViT/image_classification/MAE/transformer.py", line 310, in forward - x = self.mlp(x) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dygraph/layers.py", line 914, in __call__ - outputs = self.forward(*inputs, **kwargs) - File "/workspace/ppvit_github/PaddleViT_raw/PaddleViT/image_classification/MAE/transformer.py", line 245, in forward - x = self.fc1(x) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dygraph/layers.py", line 914, in __call__ - outputs = self.forward(*inputs, **kwargs) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/nn/layer/common.py", line 172, in forward - x=input, weight=self.weight, bias=self.bias, name=self.name) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/nn/functional/common.py", line 1474, in linear - False) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/multiprocess_utils.py", line 134, in __handler__ - core._throw_error_if_process_failed() -SystemError: (Fatal) DataLoader process (pid 1. If run DataLoader by DataLoader.from_generator(...), queue capacity is set by from_generator(..., capacity=xx, ...). - 2. If run DataLoader by DataLoader(dataset, ...), queue capacity is set as 2 times of the max value of num_workers and len(places). - 3. If run by DataLoader(dataset, ..., use_shared_memory=True), set use_shared_memory=False for not using shared memory.) exited is killed by signal: 23723. - It may be caused by insufficient shared storage space. This problem usually occurs when using docker as a development environment. - Please use command `df -h` to check the storage space of `/dev/shm`. Shared storage space needs to be greater than (DataLoader Num * DataLoader queue capacity * 1 batch data size). - You can solve this problem by increasing the shared storage space or reducing the queue capacity appropriately. -Bus error (at /paddle/paddle/fluid/imperative/data_loader.cc:177) - - -merging config from ./configs/vit_base_patch16_224_pretrain_dec1.yaml ------ Imagenet2012 image train list len = 1281167 -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:58819', '127.0.0.1:34756', '127.0.0.1:44071', '127.0.0.1:12661', '127.0.0.1:44311', '127.0.0.1:14139', '127.0.0.1:51679'] -I1219 17:02:09.309500 24382 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:58819 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:34756', '127.0.0.1:44071', '127.0.0.1:12661', '127.0.0.1:44311', '127.0.0.1:14139', '127.0.0.1:51679'] -I1219 17:02:11.901250 24397 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:34756 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:44071', '127.0.0.1:12661', '127.0.0.1:44311', '127.0.0.1:14139', '127.0.0.1:51679'] -I1219 17:02:14.341609 24414 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:44071 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:12661', '127.0.0.1:44311', '127.0.0.1:14139', '127.0.0.1:51679'] -I1219 17:02:17.001890 24429 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:12661 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:44311', '127.0.0.1:14139', '127.0.0.1:51679'] -I1219 17:02:19.379423 24447 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:44311 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:14139', '127.0.0.1:51679'] -I1219 17:02:22.029084 24463 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:14139 successful. -I1219 17:02:24.569348 24481 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:51679 successful. -I1219 17:02:24.931157 24382 nccl_context.cc:74] init nccl context nranks: 8 local rank: 1 gpu id: 1 ring id: 0 -I1219 17:02:24.931161 24397 nccl_context.cc:74] init nccl context nranks: 8 local rank: 2 gpu id: 2 ring id: 0 -I1219 17:02:24.931192 24414 nccl_context.cc:74] init nccl context nranks: 8 local rank: 3 gpu id: 3 ring id: 0 -I1219 17:02:24.931200 24429 nccl_context.cc:74] init nccl context nranks: 8 local rank: 4 gpu id: 4 ring id: 0 -I1219 17:02:24.931208 24447 nccl_context.cc:74] init nccl context nranks: 8 local rank: 5 gpu id: 5 ring id: 0 -I1219 17:02:24.931213 24463 nccl_context.cc:74] init nccl context nranks: 8 local rank: 6 gpu id: 6 ring id: 0 -I1219 17:02:24.931216 24481 nccl_context.cc:74] init nccl context nranks: 8 local rank: 7 gpu id: 7 ring id: 0 -I1219 17:02:24.931238 24365 nccl_context.cc:74] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 0 -W1219 17:02:28.374552 24365 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:02:28.374681 24397 device_context.cc:447] Please NOTE: device: 2, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:02:28.374711 24414 device_context.cc:447] Please NOTE: device: 3, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:02:28.374712 24429 device_context.cc:447] Please NOTE: device: 4, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:02:28.374729 24447 device_context.cc:447] Please NOTE: device: 5, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:02:28.374773 24382 device_context.cc:447] Please NOTE: device: 1, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:02:28.374810 24463 device_context.cc:447] Please NOTE: device: 6, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:02:28.376953 24481 device_context.cc:447] Please NOTE: device: 7, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:02:28.382552 24414 device_context.cc:465] device: 3, cuDNN Version: 7.6. -W1219 17:02:28.382556 24365 device_context.cc:465] device: 0, cuDNN Version: 7.6. -W1219 17:02:28.382561 24447 device_context.cc:465] device: 5, cuDNN Version: 7.6. -W1219 17:02:28.382565 24397 device_context.cc:465] device: 2, cuDNN Version: 7.6. -W1219 17:02:28.382582 24463 device_context.cc:465] device: 6, cuDNN Version: 7.6. -W1219 17:02:28.382568 24429 device_context.cc:465] device: 4, cuDNN Version: 7.6. -W1219 17:02:28.382580 24382 device_context.cc:465] device: 1, cuDNN Version: 7.6. -W1219 17:02:28.382681 24481 device_context.cc:465] device: 7, cuDNN Version: 7.6. -INFO:local_logger:----- world_size = 8, local_rank = 1 -INFO:local_logger:----- world_size = 8, local_rank = 5 -INFO:local_logger:----- world_size = 8, local_rank = 3 -INFO:local_logger:----- world_size = 8, local_rank = 2 -INFO:local_logger:----- world_size = 8, local_rank = 7 -INFO:local_logger:----- world_size = 8, local_rank = 6 -INFO:master_logger: -AMP: False -BASE: [''] -DATA: - BATCH_SIZE: 256 - BATCH_SIZE_EVAL: 8 - CROP_PCT: 0.875 - DATASET: imagenet2012 - DATA_PATH: /dataset/imagenet - IMAGE_SIZE: 224 - NUM_WORKERS: 4 -EVAL: False -LOCAL_RANK: 0 -MODEL: - ATTENTION_DROPOUT: 0.1 - DROPOUT: 0.1 - DROPPATH: 0.0 - MAE_PRETRAIN: True - NAME: vit_base_patch16_224_dec1 - NUM_CLASSES: 1000 - PRETRAINED: None - RESUME: None - TRANS: - DECODER: - DEPTH: 1 - EMBED_DIM: 512 - NUM_HEADS: 8 - ENCODER: - DEPTH: 12 - EMBED_DIM: 768 - NUM_HEADS: 12 - MASK_RATIO: 0.75 - MLP_RATIO: 4.0 - PATCH_SIZE: 16 - QKV_BIAS: True - TYPE: MAE -NGPUS: 8 -REPORT_FREQ: 100 -SAVE: ./output/train-20211219-17-02-00 -SAVE_FREQ: 1 -SEED: 0 -TAG: default -TRAIN: - ACCUM_ITER: 2 - BASE_LR: 0.00015 - CUTMIX_ALPHA: 1.0 - CUTMIX_MINMAX: None - END_LR: 0.0005 - GRAD_CLIP: 1 - LAST_EPOCH: 0 - LINEAR_SCALED_LR: None - LR_SCHEDULER: - DECAY_EPOCHS: 30 - DECAY_RATE: 0.1 - MILESTONES: 30, 60, 90 - NAME: warmupcosine - MIXUP_ALPHA: 0.8 - MIXUP_MODE: batch - MIXUP_PROB: 1.0 - MIXUP_SWITCH_PROB: 0.5 - NORMALIZE_TARGET: True - NUM_EPOCHS: 800 - OPTIMIZER: - BETAS: (0.9, 0.95) - EPS: 1e-08 - MOMENTUM: 0.9 - NAME: AdamW - RAND_AUGMENT: False - RAND_AUGMENT_LAYERS: 9 - RAND_AUGMENT_MAGNITUDE: 5 - SMOOTHING: 0.1 - WARMUP_EPOCHS: 40 - WARMUP_START_LR: 1e-06 - WEIGHT_DECAY: 0.05 -VALIDATE_FREQ: 100 -INFO:local_logger:----- world_size = 8, local_rank = 0 -INFO:master_logger:----- world_size = 8, local_rank = 0 -INFO:local_logger:----- world_size = 8, local_rank = 4 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:master_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:master_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:master_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1452 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1431 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1469 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1481 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1408 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1501 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1475 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1440 -INFO:master_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1457 - - --------------------------------------- -C++ Traceback (most recent call last): --------------------------------------- -No stack trace in paddle, may be caused by external reasons. - ----------------------- -Error Message Summary: ----------------------- -FatalError: `Termination signal` is detected by the operating system. - [TimeInfo: *** Aborted at 1639904603 (unix time) try "date -d @1639904603" if you are using GNU date ***] - [SignalInfo: *** SIGTERM (@0x5f17) received by PID 24365 (TID 0x7f5d5ca46700) from PID 24343 ***] - -Traceback (most recent call last): - File "main_multi_gpu_pretrain.py", line 416, in - main() - File "main_multi_gpu_pretrain.py", line 412, in main - dist.spawn(main_worker, args=(config, dataset_train, ), nprocs=config.NGPUS) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 502, in spawn - while not context.join(): - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 312, in join - self._throw_exception(error_index) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 330, in _throw_exception - raise Exception(msg) -Exception: - ----------------------------------------------- -Process 1 terminated with the following error: ----------------------------------------------- - -Traceback (most recent call last): - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 261, in _func_wrapper - result = func(*args) - File "/workspace/ppvit_github/PaddleViT_raw/PaddleViT/image_classification/MAE/main_multi_gpu_pretrain.py", line 368, in main_worker - master_logger=master_logger) - File "/workspace/ppvit_github/PaddleViT_raw/PaddleViT/image_classification/MAE/main_multi_gpu_pretrain.py", line 163, in train - loss.backward() - File "/opt/conda/envs/py36/lib/python3.6/site-packages/decorator.py", line 232, in fun - return caller(func, *(extras + args), **kw) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/wrapped_decorator.py", line 25, in __impl__ - return wrapped_func(*args, **kwargs) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/framework.py", line 229, in __impl__ - return func(*args, **kwargs) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/fluid/dygraph/varbase_patch_methods.py", line 239, in backward - framework._dygraph_tracer()) -OSError: (External) ResourceExhaustedError: - -Out of memory error on GPU 1. Cannot allocate 394.000244MB memory on GPU 1, 15.719788GB memory has been allocated and available memory is only 63.437500MB. - -Please check whether there is any other process using GPU 1. -1. If yes, please stop them, or start PaddlePaddle on another GPU. -2. If no, please decrease the batch size of your model. - - (at /paddle/paddle/fluid/memory/allocation/cuda_allocator.cc:79) - (at /paddle/paddle/fluid/imperative/basic_engine.cc:568) - - -merging config from ./configs/vit_base_patch16_224_pretrain_dec1.yaml ------ Imagenet2012 image train list len = 1281167 -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:45480', '127.0.0.1:58605', '127.0.0.1:23406', '127.0.0.1:16014', '127.0.0.1:60086', '127.0.0.1:60603', '127.0.0.1:46782'] -I1219 17:07:49.286090 25456 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:45480 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:58605', '127.0.0.1:23406', '127.0.0.1:16014', '127.0.0.1:60086', '127.0.0.1:60603', '127.0.0.1:46782'] -I1219 17:07:51.690086 25473 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:58605 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:23406', '127.0.0.1:16014', '127.0.0.1:60086', '127.0.0.1:60603', '127.0.0.1:46782'] -I1219 17:07:54.058967 25488 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:23406 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:16014', '127.0.0.1:60086', '127.0.0.1:60603', '127.0.0.1:46782'] -I1219 17:07:57.064612 25503 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:16014 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:60086', '127.0.0.1:60603', '127.0.0.1:46782'] -I1219 17:07:59.496040 25520 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:60086 successful. -server not ready, wait 3 sec to retry... -not ready endpoints:['127.0.0.1:60603', '127.0.0.1:46782'] -I1219 17:08:02.203279 25537 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:60603 successful. -I1219 17:08:04.597697 25554 gen_comm_id_helper.cc:190] Server listening on: 127.0.0.1:46782 successful. -I1219 17:08:05.017540 25473 nccl_context.cc:74] init nccl context nranks: 8 local rank: 2 gpu id: 2 ring id: 0 -I1219 17:08:05.017537 25456 nccl_context.cc:74] init nccl context nranks: 8 local rank: 1 gpu id: 1 ring id: 0 -I1219 17:08:05.017560 25488 nccl_context.cc:74] init nccl context nranks: 8 local rank: 3 gpu id: 3 ring id: 0 -I1219 17:08:05.017565 25537 nccl_context.cc:74] init nccl context nranks: 8 local rank: 6 gpu id: 6 ring id: 0 -I1219 17:08:05.017578 25503 nccl_context.cc:74] init nccl context nranks: 8 local rank: 4 gpu id: 4 ring id: 0 -I1219 17:08:05.017585 25520 nccl_context.cc:74] init nccl context nranks: 8 local rank: 5 gpu id: 5 ring id: 0 -I1219 17:08:05.017601 25554 nccl_context.cc:74] init nccl context nranks: 8 local rank: 7 gpu id: 7 ring id: 0 -I1219 17:08:05.017613 25441 nccl_context.cc:74] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 0 -W1219 17:08:09.206136 25441 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:08:09.206564 25456 device_context.cc:447] Please NOTE: device: 1, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:08:09.206579 25554 device_context.cc:447] Please NOTE: device: 7, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:08:09.206670 25488 device_context.cc:447] Please NOTE: device: 3, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:08:09.206694 25520 device_context.cc:447] Please NOTE: device: 5, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:08:09.206728 25503 device_context.cc:447] Please NOTE: device: 4, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:08:09.209081 25537 device_context.cc:447] Please NOTE: device: 6, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:08:09.209785 25473 device_context.cc:447] Please NOTE: device: 2, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 -W1219 17:08:09.212059 25456 device_context.cc:465] device: 1, cuDNN Version: 7.6. -W1219 17:08:09.212066 25554 device_context.cc:465] device: 7, cuDNN Version: 7.6. -W1219 17:08:09.212080 25503 device_context.cc:465] device: 4, cuDNN Version: 7.6. -W1219 17:08:09.212086 25520 device_context.cc:465] device: 5, cuDNN Version: 7.6. -W1219 17:08:09.212086 25488 device_context.cc:465] device: 3, cuDNN Version: 7.6. -W1219 17:08:09.212239 25441 device_context.cc:465] device: 0, cuDNN Version: 7.6. -W1219 17:08:09.213409 25537 device_context.cc:465] device: 6, cuDNN Version: 7.6. -W1219 17:08:09.214195 25473 device_context.cc:465] device: 2, cuDNN Version: 7.6. -INFO:local_logger:----- world_size = 8, local_rank = 4 -INFO:local_logger:----- world_size = 8, local_rank = 1 -INFO:local_logger:----- world_size = 8, local_rank = 2 -INFO:master_logger: -AMP: True -BASE: [''] -DATA: - BATCH_SIZE: 256 - BATCH_SIZE_EVAL: 8 - CROP_PCT: 0.875 - DATASET: imagenet2012 - DATA_PATH: /dataset/imagenet - IMAGE_SIZE: 224 - NUM_WORKERS: 2 -EVAL: False -LOCAL_RANK: 0 -MODEL: - ATTENTION_DROPOUT: 0.0 - DROPOUT: 0.0 - DROPPATH: 0.0 - MAE_PRETRAIN: True - NAME: vit_base_patch16_224_dec1 - NUM_CLASSES: 1000 - PRETRAINED: None - RESUME: None - TRANS: - DECODER: - DEPTH: 1 - EMBED_DIM: 512 - NUM_HEADS: 8 - ENCODER: - DEPTH: 12 - EMBED_DIM: 768 - NUM_HEADS: 12 - MASK_RATIO: 0.75 - MLP_RATIO: 4.0 - PATCH_SIZE: 16 - QKV_BIAS: True - TYPE: MAE -NGPUS: 8 -REPORT_FREQ: 100 -SAVE: ./output/train-20211219-17-07-40 -SAVE_FREQ: 1 -SEED: 0 -TAG: default -TRAIN: - ACCUM_ITER: 2 - BASE_LR: 0.00015 - CUTMIX_ALPHA: 1.0 - CUTMIX_MINMAX: None - END_LR: 0.0005 - GRAD_CLIP: 1 - LAST_EPOCH: 0 - LINEAR_SCALED_LR: None - LR_SCHEDULER: - DECAY_EPOCHS: 30 - DECAY_RATE: 0.1 - MILESTONES: 30, 60, 90 - NAME: warmupcosine - MIXUP_ALPHA: 0.8 - MIXUP_MODE: batch - MIXUP_PROB: 1.0 - MIXUP_SWITCH_PROB: 0.5 - NORMALIZE_TARGET: True - NUM_EPOCHS: 800 - OPTIMIZER: - BETAS: (0.9, 0.95) - EPS: 1e-08 - MOMENTUM: 0.9 - NAME: AdamW - RAND_AUGMENT: False - RAND_AUGMENT_LAYERS: 9 - RAND_AUGMENT_MAGNITUDE: 5 - SMOOTHING: 0.1 - WARMUP_EPOCHS: 40 - WARMUP_START_LR: 1e-06 - WEIGHT_DECAY: 0.05 -VALIDATE_FREQ: 100 -INFO:local_logger:----- world_size = 8, local_rank = 0 -INFO:master_logger:----- world_size = 8, local_rank = 0 -INFO:local_logger:----- world_size = 8, local_rank = 6 -INFO:local_logger:----- world_size = 8, local_rank = 5 -INFO:local_logger:----- world_size = 8, local_rank = 7 -INFO:local_logger:----- world_size = 8, local_rank = 3 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:master_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:master_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:master_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:----- Total # of train batch (single gpu): 626 -INFO:local_logger:Start training from epoch 1. -INFO:local_logger:Now training epoch 1. LR=0.000005 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1468 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1446 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1495 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1428 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1450 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1461 -INFO:master_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1454 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1459 -INFO:local_logger:Epoch[001/800], Step[0000/0626], Avg Loss: 1.1427 -INFO:local_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1136 -INFO:local_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1140 -INFO:local_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1137 -INFO:local_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1132 -INFO:local_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1132 -INFO:master_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1136 -INFO:local_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1135 -INFO:local_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1138 -INFO:local_logger:Epoch[001/800], Step[0100/0626], Avg Loss: 1.1139 -INFO:local_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0903 -INFO:local_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0904 -INFO:local_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0904 -INFO:local_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0908 -INFO:local_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0903 -INFO:local_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0900 -INFO:local_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0904 -INFO:local_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0902 -INFO:master_logger:Epoch[001/800], Step[0200/0626], Avg Loss: 1.0904 -INFO:local_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0723 -INFO:local_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0717 -INFO:local_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0718 -INFO:local_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0716 -INFO:local_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0719 -INFO:master_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0719 -INFO:local_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0718 -INFO:local_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0720 -INFO:local_logger:Epoch[001/800], Step[0300/0626], Avg Loss: 1.0720 -INFO:local_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0576 -INFO:local_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0572 -INFO:local_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0572 -INFO:local_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0570 -INFO:local_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0573 -INFO:local_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0570 -INFO:local_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0573 -INFO:master_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0572 -INFO:local_logger:Epoch[001/800], Step[0400/0626], Avg Loss: 1.0574 -INFO:local_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0461 -INFO:local_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0459 -INFO:local_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0459 -INFO:local_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0461 -INFO:local_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0457 -INFO:local_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0461 -INFO:master_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0460 -INFO:local_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0463 -INFO:local_logger:Epoch[001/800], Step[0500/0626], Avg Loss: 1.0461 -INFO:local_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0374 -INFO:local_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0374 -INFO:local_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0375 -INFO:local_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0375 -INFO:master_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0375 -INFO:local_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0372 -INFO:local_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0377 -INFO:local_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0379 -INFO:local_logger:Epoch[001/800], Step[0600/0626], Avg Loss: 1.0374 -INFO:local_logger:----- Epoch[001/800], Train Loss: 1.0359, time: 934.80 -INFO:local_logger:Now training epoch 2. LR=0.000008 -INFO:local_logger:----- Epoch[001/800], Train Loss: 1.0356, time: 934.81 -INFO:local_logger:Now training epoch 2. LR=0.000008 -INFO:local_logger:----- Epoch[001/800], Train Loss: 1.0354, time: 934.86 -INFO:local_logger:Now training epoch 2. LR=0.000008 -INFO:local_logger:----- Epoch[001/800], Train Loss: 1.0361, time: 934.98 -INFO:local_logger:Now training epoch 2. LR=0.000008 -INFO:local_logger:----- Epoch[001/800], Train Loss: 1.0358, time: 935.03 -INFO:master_logger:----- Epoch[001/800], Train Loss: 1.0357, time: 935.03 -INFO:local_logger:----- Epoch[001/800], Train Loss: 1.0358, time: 935.07 -INFO:local_logger:Now training epoch 2. LR=0.000008 -INFO:local_logger:----- Epoch[001/800], Train Loss: 1.0356, time: 935.07 -INFO:local_logger:Now training epoch 2. LR=0.000008 -INFO:local_logger:----- Epoch[001/800], Train Loss: 1.0357, time: 935.09 -INFO:local_logger:Now training epoch 2. LR=0.000008 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-1-Loss-1.0357822933105671.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-1-Loss-1.0357822933105671.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-1-Loss-1.0357822933105671.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-1-Loss-1.0357822933105671.pdopt -INFO:local_logger:Now training epoch 2. LR=0.000008 -INFO:master_logger:Now training epoch 2. LR=0.000008 -INFO:local_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9953 -INFO:master_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9905 -INFO:local_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9836 -INFO:local_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9941 -INFO:local_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9887 -INFO:local_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9872 -INFO:local_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9919 -INFO:local_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9949 -INFO:local_logger:Epoch[002/800], Step[0000/0626], Avg Loss: 0.9885 -INFO:local_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9896 -INFO:local_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9894 -INFO:local_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9900 -INFO:local_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9895 -INFO:local_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9901 -INFO:local_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9887 -INFO:local_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9897 -INFO:master_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9896 -INFO:local_logger:Epoch[002/800], Step[0100/0626], Avg Loss: 0.9900 -INFO:local_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9880 -INFO:local_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9889 -INFO:local_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9887 -INFO:local_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9883 -INFO:local_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9887 -INFO:local_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9887 -INFO:local_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9883 -INFO:master_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9885 -INFO:local_logger:Epoch[002/800], Step[0200/0626], Avg Loss: 0.9883 -INFO:local_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9878 -INFO:local_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9874 -INFO:local_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9873 -INFO:local_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9875 -INFO:master_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9876 -INFO:local_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9877 -INFO:local_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9880 -INFO:local_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9878 -INFO:local_logger:Epoch[002/800], Step[0300/0626], Avg Loss: 0.9872 -INFO:local_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9872 -INFO:local_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9870 -INFO:local_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9867 -INFO:local_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9867 -INFO:local_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9870 -INFO:local_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9871 -INFO:local_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9870 -INFO:local_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9868 -INFO:master_logger:Epoch[002/800], Step[0400/0626], Avg Loss: 0.9869 -INFO:local_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9862 -INFO:local_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9865 -INFO:local_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9861 -INFO:local_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9864 -INFO:local_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9863 -INFO:local_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9861 -INFO:local_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9862 -INFO:local_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9863 -INFO:master_logger:Epoch[002/800], Step[0500/0626], Avg Loss: 0.9863 -INFO:local_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9856 -INFO:local_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9858 -INFO:local_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9858 -INFO:local_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9855 -INFO:local_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9855 -INFO:local_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9856 -INFO:master_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9856 -INFO:local_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9856 -INFO:local_logger:Epoch[002/800], Step[0600/0626], Avg Loss: 0.9856 -INFO:local_logger:----- Epoch[002/800], Train Loss: 0.9857, time: 891.36 -INFO:local_logger:Now training epoch 3. LR=0.000012 -INFO:local_logger:----- Epoch[002/800], Train Loss: 0.9855, time: 891.28 -INFO:local_logger:Now training epoch 3. LR=0.000012 -INFO:local_logger:----- Epoch[002/800], Train Loss: 0.9853, time: 891.70 -INFO:local_logger:Now training epoch 3. LR=0.000012 -INFO:local_logger:----- Epoch[002/800], Train Loss: 0.9855, time: 891.46 -INFO:local_logger:Now training epoch 3. LR=0.000012 -INFO:local_logger:----- Epoch[002/800], Train Loss: 0.9853, time: 891.66 -INFO:local_logger:Now training epoch 3. LR=0.000012 -INFO:local_logger:----- Epoch[002/800], Train Loss: 0.9855, time: 891.47 -INFO:local_logger:Now training epoch 3. LR=0.000012 -INFO:local_logger:----- Epoch[002/800], Train Loss: 0.9857, time: 891.56 -INFO:local_logger:Now training epoch 3. LR=0.000012 -INFO:local_logger:----- Epoch[002/800], Train Loss: 0.9854, time: 887.62 -INFO:master_logger:----- Epoch[002/800], Train Loss: 0.9855, time: 887.62 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-2-Loss-0.9854484576284688.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-2-Loss-0.9854484576284688.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-2-Loss-0.9854484576284688.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-2-Loss-0.9854484576284688.pdopt -INFO:local_logger:Now training epoch 3. LR=0.000012 -INFO:master_logger:Now training epoch 3. LR=0.000012 -INFO:local_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9859 -INFO:local_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9784 -INFO:local_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9751 -INFO:master_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9809 -INFO:local_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9834 -INFO:local_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9795 -INFO:local_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9809 -INFO:local_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9833 -INFO:local_logger:Epoch[003/800], Step[0000/0626], Avg Loss: 0.9810 -INFO:local_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9816 -INFO:local_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9810 -INFO:local_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9814 -INFO:local_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9810 -INFO:master_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9813 -INFO:local_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9813 -INFO:local_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9814 -INFO:local_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9813 -INFO:local_logger:Epoch[003/800], Step[0100/0626], Avg Loss: 0.9814 -INFO:local_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9807 -INFO:local_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9808 -INFO:local_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9808 -INFO:local_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9806 -INFO:local_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9806 -INFO:local_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9804 -INFO:local_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9804 -INFO:local_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9804 -INFO:master_logger:Epoch[003/800], Step[0200/0626], Avg Loss: 0.9806 -INFO:local_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9797 -INFO:local_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9799 -INFO:local_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9799 -INFO:local_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9802 -INFO:local_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9797 -INFO:master_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9799 -INFO:local_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9798 -INFO:local_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9799 -INFO:local_logger:Epoch[003/800], Step[0300/0626], Avg Loss: 0.9798 -INFO:local_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9791 -INFO:local_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9790 -INFO:local_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9793 -INFO:local_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9789 -INFO:local_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9789 -INFO:master_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9790 -INFO:local_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9789 -INFO:local_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9789 -INFO:local_logger:Epoch[003/800], Step[0400/0626], Avg Loss: 0.9791 -INFO:local_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9780 -INFO:local_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9782 -INFO:local_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9782 -INFO:local_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9783 -INFO:master_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9782 -INFO:local_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9781 -INFO:local_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9786 -INFO:local_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9783 -INFO:local_logger:Epoch[003/800], Step[0500/0626], Avg Loss: 0.9781 -INFO:local_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9776 -INFO:local_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9776 -INFO:local_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9774 -INFO:local_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9774 -INFO:local_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9778 -INFO:master_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9775 -INFO:local_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9774 -INFO:local_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9773 -INFO:local_logger:Epoch[003/800], Step[0600/0626], Avg Loss: 0.9773 -INFO:local_logger:----- Epoch[003/800], Train Loss: 0.9774, time: 893.09 -INFO:local_logger:Now training epoch 4. LR=0.000016 -INFO:local_logger:----- Epoch[003/800], Train Loss: 0.9772, time: 893.23 -INFO:local_logger:Now training epoch 4. LR=0.000016 -INFO:local_logger:----- Epoch[003/800], Train Loss: 0.9776, time: 893.27 -INFO:local_logger:Now training epoch 4. LR=0.000016 -INFO:local_logger:----- Epoch[003/800], Train Loss: 0.9771, time: 893.31 -INFO:local_logger:Now training epoch 4. LR=0.000016 -INFO:local_logger:----- Epoch[003/800], Train Loss: 0.9772, time: 893.74 -INFO:local_logger:Now training epoch 4. LR=0.000016 -INFO:local_logger:----- Epoch[003/800], Train Loss: 0.9772, time: 889.63 -INFO:master_logger:----- Epoch[003/800], Train Loss: 0.9773, time: 889.63 -INFO:local_logger:----- Epoch[003/800], Train Loss: 0.9773, time: 893.40 -INFO:local_logger:Now training epoch 4. LR=0.000016 -INFO:local_logger:----- Epoch[003/800], Train Loss: 0.9775, time: 893.56 -INFO:local_logger:Now training epoch 4. LR=0.000016 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-3-Loss-0.9772286424963117.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-3-Loss-0.9772286424963117.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-3-Loss-0.9772286424963117.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-3-Loss-0.9772286424963117.pdopt -INFO:local_logger:Now training epoch 4. LR=0.000016 -INFO:master_logger:Now training epoch 4. LR=0.000016 -INFO:local_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9778 -INFO:local_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9751 -INFO:local_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9713 -INFO:master_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9734 -INFO:local_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9753 -INFO:local_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9753 -INFO:local_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9704 -INFO:local_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9683 -INFO:local_logger:Epoch[004/800], Step[0000/0626], Avg Loss: 0.9740 -INFO:local_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9727 -INFO:local_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9724 -INFO:local_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9730 -INFO:master_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9728 -INFO:local_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9731 -INFO:local_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9730 -INFO:local_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9729 -INFO:local_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9730 -INFO:local_logger:Epoch[004/800], Step[0100/0626], Avg Loss: 0.9726 -INFO:local_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9724 -INFO:local_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9725 -INFO:local_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9721 -INFO:local_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9721 -INFO:local_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9721 -INFO:local_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9720 -INFO:local_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9722 -INFO:local_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9724 -INFO:master_logger:Epoch[004/800], Step[0200/0626], Avg Loss: 0.9722 -INFO:local_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9715 -INFO:local_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9717 -INFO:local_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9717 -INFO:local_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9720 -INFO:master_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9717 -INFO:local_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9712 -INFO:local_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9718 -INFO:local_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9718 -INFO:local_logger:Epoch[004/800], Step[0300/0626], Avg Loss: 0.9716 -INFO:local_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9712 -INFO:local_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9711 -INFO:local_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9711 -INFO:local_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9715 -INFO:local_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9712 -INFO:local_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9709 -INFO:master_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9712 -INFO:local_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9714 -INFO:local_logger:Epoch[004/800], Step[0400/0626], Avg Loss: 0.9714 -INFO:local_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9707 -INFO:local_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9706 -INFO:local_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9709 -INFO:local_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9709 -INFO:local_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9707 -INFO:local_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9708 -INFO:local_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9705 -INFO:master_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9707 -INFO:local_logger:Epoch[004/800], Step[0500/0626], Avg Loss: 0.9706 -INFO:local_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9701 -INFO:local_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9704 -INFO:local_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9703 -INFO:local_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9701 -INFO:local_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9703 -INFO:local_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9701 -INFO:local_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9704 -INFO:master_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9703 -INFO:local_logger:Epoch[004/800], Step[0600/0626], Avg Loss: 0.9704 -INFO:local_logger:----- Epoch[004/800], Train Loss: 0.9702, time: 854.73 -INFO:local_logger:Now training epoch 5. LR=0.000020 -INFO:local_logger:----- Epoch[004/800], Train Loss: 0.9703, time: 851.06 -INFO:master_logger:----- Epoch[004/800], Train Loss: 0.9702, time: 851.06 -INFO:local_logger:----- Epoch[004/800], Train Loss: 0.9703, time: 854.82 -INFO:local_logger:Now training epoch 5. LR=0.000020 -INFO:local_logger:----- Epoch[004/800], Train Loss: 0.9700, time: 855.11 -INFO:local_logger:Now training epoch 5. LR=0.000020 -INFO:local_logger:----- Epoch[004/800], Train Loss: 0.9700, time: 855.36 -INFO:local_logger:Now training epoch 5. LR=0.000020 -INFO:local_logger:----- Epoch[004/800], Train Loss: 0.9703, time: 855.48 -INFO:local_logger:----- Epoch[004/800], Train Loss: 0.9700, time: 855.31 -INFO:local_logger:Now training epoch 5. LR=0.000020 -INFO:local_logger:Now training epoch 5. LR=0.000020 -INFO:local_logger:----- Epoch[004/800], Train Loss: 0.9702, time: 855.19 -INFO:local_logger:Now training epoch 5. LR=0.000020 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-4-Loss-0.97028241060033.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-4-Loss-0.97028241060033.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-4-Loss-0.97028241060033.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-4-Loss-0.97028241060033.pdopt -INFO:local_logger:Now training epoch 5. LR=0.000020 -INFO:master_logger:Now training epoch 5. LR=0.000020 -INFO:local_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9655 -INFO:local_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9667 -INFO:local_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9651 -INFO:master_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9667 -INFO:local_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9671 -INFO:local_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9619 -INFO:local_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9712 -INFO:local_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9685 -INFO:local_logger:Epoch[005/800], Step[0000/0626], Avg Loss: 0.9674 -INFO:local_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9675 -INFO:local_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9674 -INFO:local_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9672 -INFO:local_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9682 -INFO:local_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9673 -INFO:local_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9671 -INFO:local_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9679 -INFO:master_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9675 -INFO:local_logger:Epoch[005/800], Step[0100/0626], Avg Loss: 0.9672 -INFO:local_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9670 -INFO:local_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9665 -INFO:local_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9669 -INFO:local_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9669 -INFO:local_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9666 -INFO:local_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9673 -INFO:local_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9672 -INFO:local_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9671 -INFO:master_logger:Epoch[005/800], Step[0200/0626], Avg Loss: 0.9669 -INFO:local_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9661 -INFO:local_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9663 -INFO:local_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9665 -INFO:local_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9665 -INFO:local_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9664 -INFO:local_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9667 -INFO:master_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9665 -INFO:local_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9668 -INFO:local_logger:Epoch[005/800], Step[0300/0626], Avg Loss: 0.9665 -INFO:local_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9661 -INFO:master_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9660 -INFO:local_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9662 -INFO:local_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9660 -INFO:local_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9661 -INFO:local_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9658 -INFO:local_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9660 -INFO:local_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9658 -INFO:local_logger:Epoch[005/800], Step[0400/0626], Avg Loss: 0.9660 -INFO:local_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9655 -INFO:local_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9655 -INFO:local_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9657 -INFO:local_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9657 -INFO:local_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9656 -INFO:local_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9656 -INFO:local_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9657 -INFO:master_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9656 -INFO:local_logger:Epoch[005/800], Step[0500/0626], Avg Loss: 0.9654 -INFO:local_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9651 -INFO:local_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9653 -INFO:local_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9653 -INFO:local_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9654 -INFO:local_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9652 -INFO:local_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9652 -INFO:local_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9649 -INFO:master_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9652 -INFO:local_logger:Epoch[005/800], Step[0600/0626], Avg Loss: 0.9651 -INFO:local_logger:----- Epoch[005/800], Train Loss: 0.9648, time: 889.02 -INFO:local_logger:Now training epoch 6. LR=0.000023 -INFO:local_logger:----- Epoch[005/800], Train Loss: 0.9651, time: 889.10 -INFO:local_logger:Now training epoch 6. LR=0.000023 -INFO:local_logger:----- Epoch[005/800], Train Loss: 0.9652, time: 889.53 -INFO:local_logger:Now training epoch 6. LR=0.000023 -INFO:local_logger:----- Epoch[005/800], Train Loss: 0.9652, time: 885.85 -INFO:master_logger:----- Epoch[005/800], Train Loss: 0.9651, time: 885.85 -INFO:local_logger:----- Epoch[005/800], Train Loss: 0.9651, time: 889.20 -INFO:local_logger:Now training epoch 6. LR=0.000023 -INFO:local_logger:----- Epoch[005/800], Train Loss: 0.9650, time: 889.56 -INFO:local_logger:Now training epoch 6. LR=0.000023 -INFO:local_logger:----- Epoch[005/800], Train Loss: 0.9650, time: 889.67 -INFO:local_logger:Now training epoch 6. LR=0.000023 -INFO:local_logger:----- Epoch[005/800], Train Loss: 0.9653, time: 890.15 -INFO:local_logger:Now training epoch 6. LR=0.000023 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-5-Loss-0.9652042168475674.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-5-Loss-0.9652042168475674.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-5-Loss-0.9652042168475674.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-5-Loss-0.9652042168475674.pdopt -INFO:local_logger:Now training epoch 6. LR=0.000023 -INFO:master_logger:Now training epoch 6. LR=0.000023 -INFO:local_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9660 -INFO:local_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9643 -INFO:master_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9604 -INFO:local_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9476 -INFO:local_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9637 -INFO:local_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9585 -INFO:local_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9542 -INFO:local_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9641 -INFO:local_logger:Epoch[006/800], Step[0000/0626], Avg Loss: 0.9651 -INFO:local_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9622 -INFO:local_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9624 -INFO:local_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9627 -INFO:local_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9625 -INFO:local_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9626 -INFO:local_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9626 -INFO:master_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9626 -INFO:local_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9627 -INFO:local_logger:Epoch[006/800], Step[0100/0626], Avg Loss: 0.9632 -INFO:local_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9624 -INFO:local_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9619 -INFO:local_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9619 -INFO:local_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9624 -INFO:master_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9622 -INFO:local_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9620 -INFO:local_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9622 -INFO:local_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9624 -INFO:local_logger:Epoch[006/800], Step[0200/0626], Avg Loss: 0.9620 -INFO:local_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9613 -INFO:local_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9620 -INFO:local_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9620 -INFO:local_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9620 -INFO:local_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9615 -INFO:local_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9618 -INFO:local_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9621 -INFO:master_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9618 -INFO:local_logger:Epoch[006/800], Step[0300/0626], Avg Loss: 0.9617 -INFO:local_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9610 -INFO:local_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9615 -INFO:local_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9612 -INFO:local_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9614 -INFO:local_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9614 -INFO:local_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9612 -INFO:local_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9616 -INFO:master_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9613 -INFO:local_logger:Epoch[006/800], Step[0400/0626], Avg Loss: 0.9614 -INFO:local_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9609 -INFO:local_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9609 -INFO:local_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9612 -INFO:local_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9608 -INFO:local_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9611 -INFO:local_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9608 -INFO:local_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9610 -INFO:master_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9610 -INFO:local_logger:Epoch[006/800], Step[0500/0626], Avg Loss: 0.9612 -INFO:local_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9604 -INFO:local_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9608 -INFO:local_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9606 -INFO:local_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9607 -INFO:local_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9605 -INFO:local_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9606 -INFO:local_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9603 -INFO:local_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9609 -INFO:master_logger:Epoch[006/800], Step[0600/0626], Avg Loss: 0.9606 -INFO:local_logger:----- Epoch[006/800], Train Loss: 0.9605, time: 860.53 -INFO:local_logger:Now training epoch 7. LR=0.000027 -INFO:local_logger:----- Epoch[006/800], Train Loss: 0.9607, time: 860.72 -INFO:local_logger:Now training epoch 7. LR=0.000027 -INFO:local_logger:----- Epoch[006/800], Train Loss: 0.9604, time: 857.72 -INFO:master_logger:----- Epoch[006/800], Train Loss: 0.9605, time: 857.72 -INFO:local_logger:----- Epoch[006/800], Train Loss: 0.9607, time: 861.65 -INFO:local_logger:Now training epoch 7. LR=0.000027 -INFO:local_logger:----- Epoch[006/800], Train Loss: 0.9602, time: 861.47 -INFO:local_logger:Now training epoch 7. LR=0.000027 -INFO:local_logger:----- Epoch[006/800], Train Loss: 0.9603, time: 861.13 -INFO:local_logger:----- Epoch[006/800], Train Loss: 0.9608, time: 861.53 -INFO:local_logger:Now training epoch 7. LR=0.000027 -INFO:local_logger:Now training epoch 7. LR=0.000027 -INFO:local_logger:----- Epoch[006/800], Train Loss: 0.9603, time: 861.59 -INFO:local_logger:Now training epoch 7. LR=0.000027 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-6-Loss-0.9604088297024008.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-6-Loss-0.9604088297024008.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-6-Loss-0.9604088297024008.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-6-Loss-0.9604088297024008.pdopt -INFO:local_logger:Now training epoch 7. LR=0.000027 -INFO:master_logger:Now training epoch 7. LR=0.000027 -INFO:local_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9534 -INFO:local_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9591 -INFO:local_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9552 -INFO:master_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9581 -INFO:local_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9540 -INFO:local_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9572 -INFO:local_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9591 -INFO:local_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9645 -INFO:local_logger:Epoch[007/800], Step[0000/0626], Avg Loss: 0.9624 -INFO:local_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9583 -INFO:local_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9576 -INFO:local_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9586 -INFO:local_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9575 -INFO:local_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9582 -INFO:local_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9584 -INFO:local_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9589 -INFO:master_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9582 -INFO:local_logger:Epoch[007/800], Step[0100/0626], Avg Loss: 0.9584 -INFO:local_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9580 -INFO:local_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9575 -INFO:local_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9573 -INFO:master_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9578 -INFO:local_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9580 -INFO:local_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9581 -INFO:local_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9578 -INFO:local_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9578 -INFO:local_logger:Epoch[007/800], Step[0200/0626], Avg Loss: 0.9577 -INFO:local_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9571 -INFO:local_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9570 -INFO:local_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9575 -INFO:local_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9573 -INFO:local_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9570 -INFO:local_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9574 -INFO:master_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9573 -INFO:local_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9574 -INFO:local_logger:Epoch[007/800], Step[0300/0626], Avg Loss: 0.9577 -INFO:local_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9566 -INFO:local_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9566 -INFO:local_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9567 -INFO:local_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9572 -INFO:local_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9568 -INFO:master_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9568 -INFO:local_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9568 -INFO:local_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9570 -INFO:local_logger:Epoch[007/800], Step[0400/0626], Avg Loss: 0.9568 -INFO:local_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9563 -INFO:local_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9568 -INFO:local_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9561 -INFO:local_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9565 -INFO:local_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9565 -INFO:local_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9563 -INFO:master_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9564 -INFO:local_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9565 -INFO:local_logger:Epoch[007/800], Step[0500/0626], Avg Loss: 0.9563 -INFO:local_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9564 -INFO:local_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9561 -INFO:local_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9561 -INFO:local_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9559 -INFO:local_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9559 -INFO:local_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9558 -INFO:local_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9558 -INFO:master_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9560 -INFO:local_logger:Epoch[007/800], Step[0600/0626], Avg Loss: 0.9561 -INFO:local_logger:----- Epoch[007/800], Train Loss: 0.9560, time: 889.20 -INFO:local_logger:Now training epoch 8. LR=0.000031 -INFO:local_logger:----- Epoch[007/800], Train Loss: 0.9558, time: 888.65 -INFO:local_logger:Now training epoch 8. LR=0.000031 -INFO:local_logger:----- Epoch[007/800], Train Loss: 0.9557, time: 889.07 -INFO:local_logger:Now training epoch 8. LR=0.000031 -INFO:local_logger:----- Epoch[007/800], Train Loss: 0.9563, time: 888.69 -INFO:local_logger:Now training epoch 8. LR=0.000031 -INFO:local_logger:----- Epoch[007/800], Train Loss: 0.9559, time: 888.70 -INFO:local_logger:Now training epoch 8. LR=0.000031 -INFO:local_logger:----- Epoch[007/800], Train Loss: 0.9558, time: 888.74 -INFO:local_logger:Now training epoch 8. LR=0.000031 -INFO:local_logger:----- Epoch[007/800], Train Loss: 0.9557, time: 885.04 -INFO:local_logger:----- Epoch[007/800], Train Loss: 0.9560, time: 888.76 -INFO:master_logger:----- Epoch[007/800], Train Loss: 0.9559, time: 885.04 -INFO:local_logger:Now training epoch 8. LR=0.000031 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-7-Loss-0.9557424400537671.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-7-Loss-0.9557424400537671.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-7-Loss-0.9557424400537671.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-7-Loss-0.9557424400537671.pdopt -INFO:local_logger:Now training epoch 8. LR=0.000031 -INFO:master_logger:Now training epoch 8. LR=0.000031 -INFO:local_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9562 -INFO:master_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9506 -INFO:local_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9529 -INFO:local_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9443 -INFO:local_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9491 -INFO:local_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9499 -INFO:local_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9539 -INFO:local_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9524 -INFO:local_logger:Epoch[008/800], Step[0000/0626], Avg Loss: 0.9463 -INFO:local_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9530 -INFO:local_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9530 -INFO:local_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9531 -INFO:local_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9531 -INFO:master_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9532 -INFO:local_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9528 -INFO:local_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9532 -INFO:local_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9535 -INFO:local_logger:Epoch[008/800], Step[0100/0626], Avg Loss: 0.9540 -INFO:local_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9527 -INFO:local_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9531 -INFO:local_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9526 -INFO:local_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9526 -INFO:local_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9525 -INFO:master_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9527 -INFO:local_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9528 -INFO:local_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9529 -INFO:local_logger:Epoch[008/800], Step[0200/0626], Avg Loss: 0.9524 -INFO:local_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9524 -INFO:local_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9520 -INFO:local_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9521 -INFO:master_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9523 -INFO:local_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9524 -INFO:local_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9526 -INFO:local_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9520 -INFO:local_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9521 -INFO:local_logger:Epoch[008/800], Step[0300/0626], Avg Loss: 0.9525 -INFO:local_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9517 -INFO:local_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9518 -INFO:local_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9516 -INFO:local_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9519 -INFO:local_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9516 -INFO:local_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9515 -INFO:local_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9518 -INFO:master_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9517 -INFO:local_logger:Epoch[008/800], Step[0400/0626], Avg Loss: 0.9518 -INFO:local_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9511 -INFO:local_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9511 -INFO:local_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9513 -INFO:local_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9511 -INFO:master_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9512 -INFO:local_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9512 -INFO:local_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9513 -INFO:local_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9511 -INFO:local_logger:Epoch[008/800], Step[0500/0626], Avg Loss: 0.9512 -INFO:local_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9506 -INFO:local_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9506 -INFO:local_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9506 -INFO:local_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9508 -INFO:local_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9505 -INFO:local_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9506 -INFO:local_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9509 -INFO:master_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9506 -INFO:local_logger:Epoch[008/800], Step[0600/0626], Avg Loss: 0.9506 -INFO:local_logger:----- Epoch[008/800], Train Loss: 0.9505, time: 854.97 -INFO:local_logger:Now training epoch 9. LR=0.000035 -INFO:local_logger:----- Epoch[008/800], Train Loss: 0.9507, time: 855.87 -INFO:local_logger:Now training epoch 9. LR=0.000035 -INFO:local_logger:----- Epoch[008/800], Train Loss: 0.9506, time: 855.95 -INFO:local_logger:Now training epoch 9. LR=0.000035 -INFO:local_logger:----- Epoch[008/800], Train Loss: 0.9504, time: 855.93 -INFO:local_logger:Now training epoch 9. LR=0.000035 -INFO:local_logger:----- Epoch[008/800], Train Loss: 0.9504, time: 852.20 -INFO:master_logger:----- Epoch[008/800], Train Loss: 0.9505, time: 852.20 -INFO:local_logger:----- Epoch[008/800], Train Loss: 0.9504, time: 855.94 -INFO:local_logger:----- Epoch[008/800], Train Loss: 0.9504, time: 855.86 -INFO:local_logger:Now training epoch 9. LR=0.000035 -INFO:local_logger:Now training epoch 9. LR=0.000035 -INFO:local_logger:----- Epoch[008/800], Train Loss: 0.9506, time: 855.86 -INFO:local_logger:Now training epoch 9. LR=0.000035 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-8-Loss-0.950418085337367.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-8-Loss-0.950418085337367.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-8-Loss-0.950418085337367.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-8-Loss-0.950418085337367.pdopt -INFO:local_logger:Now training epoch 9. LR=0.000035 -INFO:master_logger:Now training epoch 9. LR=0.000035 -INFO:local_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9532 -INFO:master_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9494 -INFO:local_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9472 -INFO:local_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9535 -INFO:local_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9457 -INFO:local_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9521 -INFO:local_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9484 -INFO:local_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9460 -INFO:local_logger:Epoch[009/800], Step[0000/0626], Avg Loss: 0.9495 -INFO:local_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9469 -INFO:local_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9469 -INFO:local_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9473 -INFO:local_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9469 -INFO:local_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9459 -INFO:master_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9466 -INFO:local_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9459 -INFO:local_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9465 -INFO:local_logger:Epoch[009/800], Step[0100/0626], Avg Loss: 0.9465 -INFO:local_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9466 -INFO:local_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9460 -INFO:local_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9461 -INFO:local_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9462 -INFO:local_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9455 -INFO:local_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9466 -INFO:local_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9460 -INFO:local_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9465 -INFO:master_logger:Epoch[009/800], Step[0200/0626], Avg Loss: 0.9462 -INFO:local_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9455 -INFO:local_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9455 -INFO:local_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9450 -INFO:local_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9451 -INFO:local_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9449 -INFO:local_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9455 -INFO:local_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9452 -INFO:local_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9448 -INFO:master_logger:Epoch[009/800], Step[0300/0626], Avg Loss: 0.9452 -INFO:local_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9441 -INFO:local_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9447 -INFO:local_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9441 -INFO:local_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9444 -INFO:local_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9444 -INFO:master_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9444 -INFO:local_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9445 -INFO:local_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9446 -INFO:local_logger:Epoch[009/800], Step[0400/0626], Avg Loss: 0.9442 -INFO:local_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9437 -INFO:local_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9432 -INFO:local_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9436 -INFO:local_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9435 -INFO:local_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9434 -INFO:local_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9434 -INFO:local_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9434 -INFO:master_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9435 -INFO:local_logger:Epoch[009/800], Step[0500/0626], Avg Loss: 0.9437 -INFO:local_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9426 -INFO:local_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9427 -INFO:local_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9428 -INFO:local_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9423 -INFO:local_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9427 -INFO:local_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9421 -INFO:local_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9426 -INFO:local_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9427 -INFO:master_logger:Epoch[009/800], Step[0600/0626], Avg Loss: 0.9426 -INFO:local_logger:----- Epoch[009/800], Train Loss: 0.9425, time: 891.29 -INFO:local_logger:Now training epoch 10. LR=0.000038 -INFO:local_logger:----- Epoch[009/800], Train Loss: 0.9425, time: 886.67 -INFO:master_logger:----- Epoch[009/800], Train Loss: 0.9424, time: 886.67 -INFO:local_logger:----- Epoch[009/800], Train Loss: 0.9420, time: 891.03 -INFO:local_logger:Now training epoch 10. LR=0.000038 -INFO:local_logger:----- Epoch[009/800], Train Loss: 0.9425, time: 891.03 -INFO:local_logger:Now training epoch 10. LR=0.000038 -INFO:local_logger:----- Epoch[009/800], Train Loss: 0.9421, time: 891.05 -INFO:local_logger:Now training epoch 10. LR=0.000038 -INFO:local_logger:----- Epoch[009/800], Train Loss: 0.9424, time: 891.03 -INFO:local_logger:Now training epoch 10. LR=0.000038 -INFO:local_logger:----- Epoch[009/800], Train Loss: 0.9426, time: 891.05 -INFO:local_logger:Now training epoch 10. LR=0.000038 -INFO:local_logger:----- Epoch[009/800], Train Loss: 0.9425, time: 891.04 -INFO:local_logger:Now training epoch 10. LR=0.000038 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-9-Loss-0.9425096387053156.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-9-Loss-0.9425096387053156.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-9-Loss-0.9425096387053156.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-9-Loss-0.9425096387053156.pdopt -INFO:local_logger:Now training epoch 10. LR=0.000038 -INFO:master_logger:Now training epoch 10. LR=0.000038 -INFO:local_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9389 -INFO:local_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9403 -INFO:master_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9385 -INFO:local_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9304 -INFO:local_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9343 -INFO:local_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9450 -INFO:local_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9331 -INFO:local_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9421 -INFO:local_logger:Epoch[010/800], Step[0000/0626], Avg Loss: 0.9438 -INFO:local_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9362 -INFO:local_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9361 -INFO:local_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9356 -INFO:local_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9360 -INFO:master_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9362 -INFO:local_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9362 -INFO:local_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9361 -INFO:local_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9361 -INFO:local_logger:Epoch[010/800], Step[0100/0626], Avg Loss: 0.9371 -INFO:local_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9354 -INFO:local_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9358 -INFO:local_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9355 -INFO:local_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9355 -INFO:master_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9357 -INFO:local_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9360 -INFO:local_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9355 -INFO:local_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9359 -INFO:local_logger:Epoch[010/800], Step[0200/0626], Avg Loss: 0.9358 -INFO:local_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9345 -INFO:local_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9350 -INFO:local_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9346 -INFO:local_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9345 -INFO:local_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9348 -INFO:local_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9348 -INFO:master_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9348 -INFO:local_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9348 -INFO:local_logger:Epoch[010/800], Step[0300/0626], Avg Loss: 0.9350 -INFO:local_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9337 -INFO:local_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9337 -INFO:local_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9336 -INFO:local_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9339 -INFO:local_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9340 -INFO:local_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9340 -INFO:local_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9340 -INFO:master_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9338 -INFO:local_logger:Epoch[010/800], Step[0400/0626], Avg Loss: 0.9337 -INFO:local_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9330 -INFO:local_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9327 -INFO:local_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9328 -INFO:local_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9328 -INFO:local_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9330 -INFO:master_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9329 -INFO:local_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9326 -INFO:local_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9329 -INFO:local_logger:Epoch[010/800], Step[0500/0626], Avg Loss: 0.9330 -INFO:local_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9320 -INFO:local_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9320 -INFO:local_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9323 -INFO:local_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9321 -INFO:local_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9322 -INFO:local_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9321 -INFO:local_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9320 -INFO:local_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9321 -INFO:master_logger:Epoch[010/800], Step[0600/0626], Avg Loss: 0.9321 -INFO:local_logger:----- Epoch[010/800], Train Loss: 0.9317, time: 857.40 -INFO:local_logger:Now training epoch 11. LR=0.000042 -INFO:local_logger:----- Epoch[010/800], Train Loss: 0.9320, time: 857.41 -INFO:local_logger:Now training epoch 11. LR=0.000042 -INFO:local_logger:----- Epoch[010/800], Train Loss: 0.9318, time: 854.67 -INFO:master_logger:----- Epoch[010/800], Train Loss: 0.9318, time: 854.67 -INFO:local_logger:----- Epoch[010/800], Train Loss: 0.9318, time: 858.49 -INFO:local_logger:Now training epoch 11. LR=0.000042 -INFO:local_logger:----- Epoch[010/800], Train Loss: 0.9319, time: 857.80 -INFO:local_logger:Now training epoch 11. LR=0.000042 -INFO:local_logger:----- Epoch[010/800], Train Loss: 0.9319, time: 857.83 -INFO:local_logger:Now training epoch 11. LR=0.000042 -INFO:local_logger:----- Epoch[010/800], Train Loss: 0.9319, time: 857.81 -INFO:local_logger:Now training epoch 11. LR=0.000042 -INFO:local_logger:----- Epoch[010/800], Train Loss: 0.9318, time: 857.82 -INFO:local_logger:Now training epoch 11. LR=0.000042 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-10-Loss-0.9318290638491608.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-10-Loss-0.9318290638491608.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-10-Loss-0.9318290638491608.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-10-Loss-0.9318290638491608.pdopt -INFO:local_logger:Now training epoch 11. LR=0.000042 -INFO:master_logger:Now training epoch 11. LR=0.000042 -INFO:local_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9246 -INFO:local_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9293 -INFO:master_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9253 -INFO:local_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9166 -INFO:local_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9227 -INFO:local_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9325 -INFO:local_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9194 -INFO:local_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9280 -INFO:local_logger:Epoch[011/800], Step[0000/0626], Avg Loss: 0.9296 -INFO:local_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9257 -INFO:local_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9242 -INFO:local_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9247 -INFO:local_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9260 -INFO:local_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9256 -INFO:master_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9254 -INFO:local_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9255 -INFO:local_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9251 -INFO:local_logger:Epoch[011/800], Step[0100/0626], Avg Loss: 0.9261 -INFO:local_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9257 -INFO:local_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9247 -INFO:local_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9255 -INFO:local_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9252 -INFO:local_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9255 -INFO:master_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9252 -INFO:local_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9245 -INFO:local_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9253 -INFO:local_logger:Epoch[011/800], Step[0200/0626], Avg Loss: 0.9256 -INFO:local_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9241 -INFO:local_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9237 -INFO:local_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9244 -INFO:local_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9244 -INFO:local_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9246 -INFO:local_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9238 -INFO:local_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9242 -INFO:local_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9241 -INFO:master_logger:Epoch[011/800], Step[0300/0626], Avg Loss: 0.9242 -INFO:local_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9234 -INFO:local_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9229 -INFO:local_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9234 -INFO:local_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9229 -INFO:local_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9234 -INFO:local_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9233 -INFO:local_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9233 -INFO:master_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9232 -INFO:local_logger:Epoch[011/800], Step[0400/0626], Avg Loss: 0.9235 -INFO:local_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9224 -INFO:local_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9222 -INFO:local_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9225 -INFO:local_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9217 -INFO:local_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9223 -INFO:local_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9222 -INFO:local_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9219 -INFO:master_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9222 -INFO:local_logger:Epoch[011/800], Step[0500/0626], Avg Loss: 0.9223 -INFO:local_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9214 -INFO:local_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9207 -INFO:local_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9211 -INFO:local_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9211 -INFO:local_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9210 -INFO:local_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9212 -INFO:master_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9211 -INFO:local_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9212 -INFO:local_logger:Epoch[011/800], Step[0600/0626], Avg Loss: 0.9213 -INFO:local_logger:----- Epoch[011/800], Train Loss: 0.9209, time: 888.60 -INFO:local_logger:----- Epoch[011/800], Train Loss: 0.9210, time: 888.60 -INFO:local_logger:----- Epoch[011/800], Train Loss: 0.9208, time: 889.00 -INFO:local_logger:Now training epoch 12. LR=0.000046 -INFO:local_logger:Now training epoch 12. LR=0.000046 -INFO:local_logger:Now training epoch 12. LR=0.000046 -INFO:local_logger:----- Epoch[011/800], Train Loss: 0.9209, time: 888.65 -INFO:local_logger:Now training epoch 12. LR=0.000046 -INFO:local_logger:----- Epoch[011/800], Train Loss: 0.9209, time: 884.67 -INFO:master_logger:----- Epoch[011/800], Train Loss: 0.9209, time: 884.67 -INFO:local_logger:----- Epoch[011/800], Train Loss: 0.9205, time: 888.70 -INFO:local_logger:----- Epoch[011/800], Train Loss: 0.9211, time: 889.09 -INFO:local_logger:Now training epoch 12. LR=0.000046 -INFO:local_logger:Now training epoch 12. LR=0.000046 -INFO:local_logger:----- Epoch[011/800], Train Loss: 0.9209, time: 888.68 -INFO:local_logger:Now training epoch 12. LR=0.000046 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-11-Loss-0.9209032249693648.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-11-Loss-0.9209032249693648.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-11-Loss-0.9209032249693648.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-11-Loss-0.9209032249693648.pdopt -INFO:local_logger:Now training epoch 12. LR=0.000046 -INFO:master_logger:Now training epoch 12. LR=0.000046 -INFO:local_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9086 -INFO:local_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9130 -INFO:master_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9125 -INFO:local_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9171 -INFO:local_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9156 -INFO:local_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9156 -INFO:local_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9171 -INFO:local_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9090 -INFO:local_logger:Epoch[012/800], Step[0000/0626], Avg Loss: 0.9038 -INFO:local_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9149 -INFO:local_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9150 -INFO:local_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9146 -INFO:local_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9152 -INFO:local_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9145 -INFO:local_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9153 -INFO:master_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9149 -INFO:local_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9149 -INFO:local_logger:Epoch[012/800], Step[0100/0626], Avg Loss: 0.9149 -INFO:local_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9144 -INFO:local_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9141 -INFO:local_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9138 -INFO:local_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9139 -INFO:local_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9143 -INFO:local_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9141 -INFO:local_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9142 -INFO:master_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9142 -INFO:local_logger:Epoch[012/800], Step[0200/0626], Avg Loss: 0.9145 -INFO:local_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9128 -INFO:local_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9132 -INFO:local_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9126 -INFO:local_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9129 -INFO:local_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9133 -INFO:local_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9131 -INFO:master_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9130 -INFO:local_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9127 -INFO:local_logger:Epoch[012/800], Step[0300/0626], Avg Loss: 0.9132 -INFO:local_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9121 -INFO:local_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9115 -INFO:local_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9118 -INFO:local_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9120 -INFO:master_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9119 -INFO:local_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9117 -INFO:local_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9118 -INFO:local_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9119 -INFO:local_logger:Epoch[012/800], Step[0400/0626], Avg Loss: 0.9121 -INFO:local_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9113 -INFO:local_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9111 -INFO:local_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9111 -INFO:local_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9108 -INFO:local_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9108 -INFO:master_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9111 -INFO:local_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9111 -INFO:local_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9113 -INFO:local_logger:Epoch[012/800], Step[0500/0626], Avg Loss: 0.9112 -INFO:local_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9103 -INFO:local_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9101 -INFO:local_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9102 -INFO:local_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9105 -INFO:local_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9103 -INFO:local_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9103 -INFO:local_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9101 -INFO:local_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9099 -INFO:master_logger:Epoch[012/800], Step[0600/0626], Avg Loss: 0.9102 -INFO:local_logger:----- Epoch[012/800], Train Loss: 0.9102, time: 850.59 -INFO:local_logger:Now training epoch 13. LR=0.000049 -INFO:local_logger:----- Epoch[012/800], Train Loss: 0.9099, time: 850.55 -INFO:local_logger:Now training epoch 13. LR=0.000049 -INFO:local_logger:----- Epoch[012/800], Train Loss: 0.9104, time: 851.02 -INFO:local_logger:Now training epoch 13. LR=0.000049 -INFO:local_logger:----- Epoch[012/800], Train Loss: 0.9099, time: 851.10 -INFO:local_logger:Now training epoch 13. LR=0.000049 -INFO:local_logger:----- Epoch[012/800], Train Loss: 0.9100, time: 851.10 -INFO:local_logger:Now training epoch 13. LR=0.000049 -INFO:local_logger:----- Epoch[012/800], Train Loss: 0.9097, time: 847.34 -INFO:master_logger:----- Epoch[012/800], Train Loss: 0.9101, time: 847.34 -INFO:local_logger:----- Epoch[012/800], Train Loss: 0.9101, time: 851.03 -INFO:local_logger:----- Epoch[012/800], Train Loss: 0.9101, time: 851.05 -INFO:local_logger:Now training epoch 13. LR=0.000049 -INFO:local_logger:Now training epoch 13. LR=0.000049 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-12-Loss-0.9097320030754859.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-12-Loss-0.9097320030754859.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-12-Loss-0.9097320030754859.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-12-Loss-0.9097320030754859.pdopt -INFO:local_logger:Now training epoch 13. LR=0.000049 -INFO:master_logger:Now training epoch 13. LR=0.000049 -INFO:local_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.9093 -INFO:local_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.9118 -INFO:master_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.9072 -INFO:local_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.9034 -INFO:local_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.9130 -INFO:local_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.9112 -INFO:local_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.9077 -INFO:local_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.9036 -INFO:local_logger:Epoch[013/800], Step[0000/0626], Avg Loss: 0.8972 -INFO:local_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9039 -INFO:local_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9045 -INFO:local_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9040 -INFO:local_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9050 -INFO:local_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9046 -INFO:master_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9044 -INFO:local_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9047 -INFO:local_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9041 -INFO:local_logger:Epoch[013/800], Step[0100/0626], Avg Loss: 0.9043 -INFO:local_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9035 -INFO:local_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9038 -INFO:local_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9041 -INFO:local_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9039 -INFO:local_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9040 -INFO:master_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9039 -INFO:local_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9040 -INFO:local_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9039 -INFO:local_logger:Epoch[013/800], Step[0200/0626], Avg Loss: 0.9040 -INFO:local_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9027 -INFO:local_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9024 -INFO:local_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9030 -INFO:local_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9027 -INFO:local_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9029 -INFO:local_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9032 -INFO:local_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9029 -INFO:master_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9028 -INFO:local_logger:Epoch[013/800], Step[0300/0626], Avg Loss: 0.9029 -INFO:local_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9021 -INFO:local_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9018 -INFO:local_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9023 -INFO:local_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9018 -INFO:local_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9019 -INFO:master_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9019 -INFO:local_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9015 -INFO:local_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9020 -INFO:local_logger:Epoch[013/800], Step[0400/0626], Avg Loss: 0.9016 -INFO:local_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9012 -INFO:local_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9014 -INFO:local_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9018 -INFO:local_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9013 -INFO:master_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9013 -INFO:local_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9014 -INFO:local_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9011 -INFO:local_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9008 -INFO:local_logger:Epoch[013/800], Step[0500/0626], Avg Loss: 0.9010 -INFO:local_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.9002 -INFO:local_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.9003 -INFO:local_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.9009 -INFO:master_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.9003 -INFO:local_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.8999 -INFO:local_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.9002 -INFO:local_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.9001 -INFO:local_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.9003 -INFO:local_logger:Epoch[013/800], Step[0600/0626], Avg Loss: 0.9006 -INFO:local_logger:----- Epoch[013/800], Train Loss: 0.9000, time: 883.21 -INFO:local_logger:Now training epoch 14. LR=0.000053 -INFO:local_logger:----- Epoch[013/800], Train Loss: 0.8998, time: 883.55 -INFO:local_logger:Now training epoch 14. LR=0.000053 -INFO:local_logger:----- Epoch[013/800], Train Loss: 0.9000, time: 879.83 -INFO:master_logger:----- Epoch[013/800], Train Loss: 0.9000, time: 879.83 -INFO:local_logger:----- Epoch[013/800], Train Loss: 0.8996, time: 883.81 -INFO:local_logger:Now training epoch 14. LR=0.000053 -INFO:local_logger:----- Epoch[013/800], Train Loss: 0.9003, time: 884.67 -INFO:local_logger:Now training epoch 14. LR=0.000053 -INFO:local_logger:----- Epoch[013/800], Train Loss: 0.8999, time: 884.16 -INFO:local_logger:Now training epoch 14. LR=0.000053 -INFO:local_logger:----- Epoch[013/800], Train Loss: 0.9005, time: 884.17 -INFO:local_logger:Now training epoch 14. LR=0.000053 -INFO:local_logger:----- Epoch[013/800], Train Loss: 0.8999, time: 884.64 -INFO:local_logger:Now training epoch 14. LR=0.000053 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-13-Loss-0.9000374903566999.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-13-Loss-0.9000374903566999.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-13-Loss-0.9000374903566999.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-13-Loss-0.9000374903566999.pdopt -INFO:local_logger:Now training epoch 14. LR=0.000053 -INFO:master_logger:Now training epoch 14. LR=0.000053 -INFO:local_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8904 -INFO:master_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8921 -INFO:local_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8961 -INFO:local_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8964 -INFO:local_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8893 -INFO:local_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8854 -INFO:local_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8944 -INFO:local_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8877 -INFO:local_logger:Epoch[014/800], Step[0000/0626], Avg Loss: 0.8971 -INFO:local_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8953 -INFO:local_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8955 -INFO:local_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8938 -INFO:local_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8954 -INFO:master_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8951 -INFO:local_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8947 -INFO:local_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8947 -INFO:local_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8950 -INFO:local_logger:Epoch[014/800], Step[0100/0626], Avg Loss: 0.8962 -INFO:local_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8930 -INFO:local_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8934 -INFO:local_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8928 -INFO:local_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8927 -INFO:local_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8928 -INFO:local_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8934 -INFO:master_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8931 -INFO:local_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8934 -INFO:local_logger:Epoch[014/800], Step[0200/0626], Avg Loss: 0.8931 -INFO:local_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8927 -INFO:local_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8919 -INFO:local_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8921 -INFO:local_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8920 -INFO:local_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8926 -INFO:local_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8924 -INFO:local_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8919 -INFO:master_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8922 -INFO:local_logger:Epoch[014/800], Step[0300/0626], Avg Loss: 0.8920 -INFO:local_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8909 -INFO:local_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8914 -INFO:local_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8908 -INFO:local_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8909 -INFO:local_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8912 -INFO:local_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8910 -INFO:master_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8910 -INFO:local_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8906 -INFO:local_logger:Epoch[014/800], Step[0400/0626], Avg Loss: 0.8914 -INFO:local_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8903 -INFO:local_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8904 -INFO:local_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8906 -INFO:local_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8900 -INFO:local_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8906 -INFO:local_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8902 -INFO:local_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8901 -INFO:local_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8902 -INFO:master_logger:Epoch[014/800], Step[0500/0626], Avg Loss: 0.8903 -INFO:local_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8896 -INFO:local_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8898 -INFO:local_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8896 -INFO:master_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8896 -INFO:local_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8897 -INFO:local_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8893 -INFO:local_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8897 -INFO:local_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8893 -INFO:local_logger:Epoch[014/800], Step[0600/0626], Avg Loss: 0.8894 -INFO:local_logger:----- Epoch[014/800], Train Loss: 0.8895, time: 845.39 -INFO:local_logger:Now training epoch 15. LR=0.000057 -INFO:local_logger:----- Epoch[014/800], Train Loss: 0.8895, time: 845.75 -INFO:local_logger:Now training epoch 15. LR=0.000057 -INFO:local_logger:----- Epoch[014/800], Train Loss: 0.8892, time: 846.69 -INFO:local_logger:----- Epoch[014/800], Train Loss: 0.8894, time: 846.08 -INFO:local_logger:Now training epoch 15. LR=0.000057 -INFO:local_logger:----- Epoch[014/800], Train Loss: 0.8891, time: 847.03 -INFO:local_logger:Now training epoch 15. LR=0.000057 -INFO:local_logger:Now training epoch 15. LR=0.000057 -INFO:local_logger:----- Epoch[014/800], Train Loss: 0.8890, time: 846.07 -INFO:local_logger:Now training epoch 15. LR=0.000057 -INFO:local_logger:----- Epoch[014/800], Train Loss: 0.8893, time: 842.90 -INFO:master_logger:----- Epoch[014/800], Train Loss: 0.8893, time: 842.90 -INFO:local_logger:----- Epoch[014/800], Train Loss: 0.8894, time: 846.07 -INFO:local_logger:Now training epoch 15. LR=0.000057 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-14-Loss-0.8892871914493445.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-14-Loss-0.8892871914493445.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-14-Loss-0.8892871914493445.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-14-Loss-0.8892871914493445.pdopt -INFO:local_logger:Now training epoch 15. LR=0.000057 -INFO:master_logger:Now training epoch 15. LR=0.000057 -INFO:local_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8662 -INFO:local_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8831 -INFO:local_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8842 -INFO:local_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8880 -INFO:local_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8864 -INFO:local_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8846 -INFO:master_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8812 -INFO:local_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8773 -INFO:local_logger:Epoch[015/800], Step[0000/0626], Avg Loss: 0.8795 -INFO:local_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8853 -INFO:local_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8863 -INFO:local_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8867 -INFO:local_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8861 -INFO:local_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8861 -INFO:local_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8862 -INFO:local_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8860 -INFO:master_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8860 -INFO:local_logger:Epoch[015/800], Step[0100/0626], Avg Loss: 0.8851 -INFO:local_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8851 -INFO:local_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8849 -INFO:local_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8845 -INFO:local_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8847 -INFO:master_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8846 -INFO:local_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8841 -INFO:local_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8841 -INFO:local_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8847 -INFO:local_logger:Epoch[015/800], Step[0200/0626], Avg Loss: 0.8845 -INFO:local_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8832 -INFO:local_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8831 -INFO:local_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8833 -INFO:local_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8827 -INFO:local_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8833 -INFO:local_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8833 -INFO:local_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8830 -INFO:master_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8831 -INFO:local_logger:Epoch[015/800], Step[0300/0626], Avg Loss: 0.8826 -INFO:local_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8827 -INFO:local_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8824 -INFO:local_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8825 -INFO:local_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8824 -INFO:local_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8827 -INFO:local_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8820 -INFO:local_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8828 -INFO:local_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8819 -INFO:master_logger:Epoch[015/800], Step[0400/0626], Avg Loss: 0.8824 -INFO:local_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8819 -INFO:local_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8813 -INFO:local_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8812 -INFO:local_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8818 -INFO:local_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8816 -INFO:local_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8820 -INFO:local_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8815 -INFO:master_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8816 -INFO:local_logger:Epoch[015/800], Step[0500/0626], Avg Loss: 0.8818 -INFO:local_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8808 -INFO:local_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8805 -INFO:local_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8807 -INFO:local_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8807 -INFO:local_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8804 -INFO:master_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8806 -INFO:local_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8808 -INFO:local_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8804 -INFO:local_logger:Epoch[015/800], Step[0600/0626], Avg Loss: 0.8809 -INFO:local_logger:----- Epoch[015/800], Train Loss: 0.8805, time: 897.37 -INFO:local_logger:Now training epoch 16. LR=0.000061 -INFO:local_logger:----- Epoch[015/800], Train Loss: 0.8805, time: 893.90 -INFO:master_logger:----- Epoch[015/800], Train Loss: 0.8804, time: 893.90 -INFO:local_logger:----- Epoch[015/800], Train Loss: 0.8805, time: 898.09 -INFO:local_logger:----- Epoch[015/800], Train Loss: 0.8803, time: 898.09 -INFO:local_logger:Now training epoch 16. LR=0.000061 -INFO:local_logger:Now training epoch 16. LR=0.000061 -INFO:local_logger:----- Epoch[015/800], Train Loss: 0.8802, time: 898.08 -INFO:local_logger:Now training epoch 16. LR=0.000061 -INFO:local_logger:----- Epoch[015/800], Train Loss: 0.8802, time: 898.09 -INFO:local_logger:Now training epoch 16. LR=0.000061 -INFO:local_logger:----- Epoch[015/800], Train Loss: 0.8807, time: 898.77 -INFO:local_logger:Now training epoch 16. LR=0.000061 -INFO:local_logger:----- Epoch[015/800], Train Loss: 0.8806, time: 898.79 -INFO:local_logger:Now training epoch 16. LR=0.000061 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-15-Loss-0.8804958925234925.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-15-Loss-0.8804958925234925.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-15-Loss-0.8804958925234925.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-15-Loss-0.8804958925234925.pdopt -INFO:local_logger:Now training epoch 16. LR=0.000061 -INFO:master_logger:Now training epoch 16. LR=0.000061 -INFO:local_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8772 -INFO:local_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8776 -INFO:local_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8818 -INFO:local_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8756 -INFO:master_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8774 -INFO:local_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8834 -INFO:local_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8802 -INFO:local_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8772 -INFO:local_logger:Epoch[016/800], Step[0000/0626], Avg Loss: 0.8659 -INFO:local_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8729 -INFO:local_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8735 -INFO:local_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8742 -INFO:local_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8743 -INFO:local_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8738 -INFO:master_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8738 -INFO:local_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8741 -INFO:local_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8747 -INFO:local_logger:Epoch[016/800], Step[0100/0626], Avg Loss: 0.8731 -INFO:local_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8724 -INFO:local_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8723 -INFO:local_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8727 -INFO:local_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8726 -INFO:local_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8731 -INFO:local_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8732 -INFO:local_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8728 -INFO:master_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8728 -INFO:local_logger:Epoch[016/800], Step[0200/0626], Avg Loss: 0.8732 -INFO:local_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8718 -INFO:local_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8723 -INFO:local_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8717 -INFO:local_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8718 -INFO:local_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8718 -INFO:local_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8720 -INFO:master_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8719 -INFO:local_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8722 -INFO:local_logger:Epoch[016/800], Step[0300/0626], Avg Loss: 0.8717 -INFO:local_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8710 -INFO:local_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8710 -INFO:local_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8710 -INFO:local_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8713 -INFO:local_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8712 -INFO:local_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8717 -INFO:master_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8712 -INFO:local_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8713 -INFO:local_logger:Epoch[016/800], Step[0400/0626], Avg Loss: 0.8713 -INFO:local_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8707 -INFO:local_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8706 -INFO:local_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8710 -INFO:local_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8707 -INFO:local_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8709 -INFO:local_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8706 -INFO:master_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8707 -INFO:local_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8705 -INFO:local_logger:Epoch[016/800], Step[0500/0626], Avg Loss: 0.8709 -INFO:local_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8696 -INFO:local_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8697 -INFO:local_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8697 -INFO:local_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8697 -INFO:local_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8696 -INFO:local_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8700 -INFO:local_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8701 -INFO:master_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8698 -INFO:local_logger:Epoch[016/800], Step[0600/0626], Avg Loss: 0.8700 -INFO:local_logger:----- Epoch[016/800], Train Loss: 0.8695, time: 861.71 -INFO:local_logger:Now training epoch 17. LR=0.000064 -INFO:local_logger:----- Epoch[016/800], Train Loss: 0.8693, time: 862.54 -INFO:local_logger:Now training epoch 17. LR=0.000064 -INFO:local_logger:----- Epoch[016/800], Train Loss: 0.8699, time: 862.86 -INFO:local_logger:Now training epoch 17. LR=0.000064 -INFO:local_logger:----- Epoch[016/800], Train Loss: 0.8695, time: 863.58 -INFO:local_logger:Now training epoch 17. LR=0.000064 -INFO:local_logger:----- Epoch[016/800], Train Loss: 0.8695, time: 862.86 -INFO:local_logger:----- Epoch[016/800], Train Loss: 0.8698, time: 862.85 -INFO:local_logger:Now training epoch 17. LR=0.000064 -INFO:local_logger:Now training epoch 17. LR=0.000064 -INFO:local_logger:----- Epoch[016/800], Train Loss: 0.8698, time: 862.87 -INFO:local_logger:Now training epoch 17. LR=0.000064 -INFO:local_logger:----- Epoch[016/800], Train Loss: 0.8694, time: 859.59 -INFO:master_logger:----- Epoch[016/800], Train Loss: 0.8696, time: 859.59 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-16-Loss-0.8694310493630203.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-16-Loss-0.8694310493630203.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-16-Loss-0.8694310493630203.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-16-Loss-0.8694310493630203.pdopt -INFO:local_logger:Now training epoch 17. LR=0.000064 -INFO:master_logger:Now training epoch 17. LR=0.000064 -INFO:local_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8663 -INFO:local_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8709 -INFO:local_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8677 -INFO:local_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8526 -INFO:local_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8679 -INFO:local_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8632 -INFO:master_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8658 -INFO:local_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8670 -INFO:local_logger:Epoch[017/800], Step[0000/0626], Avg Loss: 0.8705 -INFO:local_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8666 -INFO:local_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8669 -INFO:local_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8674 -INFO:local_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8667 -INFO:master_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8668 -INFO:local_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8662 -INFO:local_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8672 -INFO:local_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8665 -INFO:local_logger:Epoch[017/800], Step[0100/0626], Avg Loss: 0.8673 -INFO:local_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8654 -INFO:local_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8660 -INFO:local_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8658 -INFO:local_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8659 -INFO:local_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8654 -INFO:local_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8658 -INFO:master_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8657 -INFO:local_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8656 -INFO:local_logger:Epoch[017/800], Step[0200/0626], Avg Loss: 0.8654 -INFO:local_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8647 -INFO:local_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8648 -INFO:local_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8646 -INFO:local_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8649 -INFO:master_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8647 -INFO:local_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8652 -INFO:local_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8646 -INFO:local_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8642 -INFO:local_logger:Epoch[017/800], Step[0300/0626], Avg Loss: 0.8648 -INFO:local_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8629 -INFO:local_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8639 -INFO:local_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8636 -INFO:local_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8635 -INFO:local_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8634 -INFO:local_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8634 -INFO:local_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8635 -INFO:local_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8633 -INFO:master_logger:Epoch[017/800], Step[0400/0626], Avg Loss: 0.8634 -INFO:local_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8619 -INFO:local_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8628 -INFO:local_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8626 -INFO:local_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8624 -INFO:local_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8622 -INFO:local_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8624 -INFO:master_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8624 -INFO:local_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8625 -INFO:local_logger:Epoch[017/800], Step[0500/0626], Avg Loss: 0.8625 -INFO:local_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8615 -INFO:local_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8619 -INFO:local_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8620 -INFO:local_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8613 -INFO:local_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8618 -INFO:local_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8618 -INFO:local_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8616 -INFO:master_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8617 -INFO:local_logger:Epoch[017/800], Step[0600/0626], Avg Loss: 0.8619 -INFO:local_logger:----- Epoch[017/800], Train Loss: 0.8617, time: 890.30 -INFO:local_logger:Now training epoch 18. LR=0.000068 -INFO:local_logger:----- Epoch[017/800], Train Loss: 0.8616, time: 890.30 -INFO:local_logger:Now training epoch 18. LR=0.000068 -INFO:local_logger:----- Epoch[017/800], Train Loss: 0.8617, time: 890.31 -INFO:local_logger:Now training epoch 18. LR=0.000068 -INFO:local_logger:----- Epoch[017/800], Train Loss: 0.8610, time: 890.96 -INFO:local_logger:Now training epoch 18. LR=0.000068 -INFO:local_logger:----- Epoch[017/800], Train Loss: 0.8614, time: 887.14 -INFO:master_logger:----- Epoch[017/800], Train Loss: 0.8615, time: 887.14 -INFO:local_logger:----- Epoch[017/800], Train Loss: 0.8616, time: 891.29 -INFO:local_logger:Now training epoch 18. LR=0.000068 -INFO:local_logger:----- Epoch[017/800], Train Loss: 0.8617, time: 890.99 -INFO:local_logger:Now training epoch 18. LR=0.000068 -INFO:local_logger:----- Epoch[017/800], Train Loss: 0.8614, time: 892.15 -INFO:local_logger:Now training epoch 18. LR=0.000068 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-17-Loss-0.8613511298173326.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-17-Loss-0.8613511298173326.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-17-Loss-0.8613511298173326.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-17-Loss-0.8613511298173326.pdopt -INFO:local_logger:Now training epoch 18. LR=0.000068 -INFO:master_logger:Now training epoch 18. LR=0.000068 -INFO:local_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8610 -INFO:master_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8573 -INFO:local_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8499 -INFO:local_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8529 -INFO:local_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8567 -INFO:local_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8551 -INFO:local_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8589 -INFO:local_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8601 -INFO:local_logger:Epoch[018/800], Step[0000/0626], Avg Loss: 0.8641 -INFO:local_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8555 -INFO:local_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8553 -INFO:local_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8547 -INFO:local_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8552 -INFO:local_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8543 -INFO:local_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8543 -INFO:master_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8547 -INFO:local_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8542 -INFO:local_logger:Epoch[018/800], Step[0100/0626], Avg Loss: 0.8543 -INFO:local_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8544 -INFO:local_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8543 -INFO:local_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8543 -INFO:local_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8541 -INFO:local_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8537 -INFO:master_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8541 -INFO:local_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8544 -INFO:local_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8539 -INFO:local_logger:Epoch[018/800], Step[0200/0626], Avg Loss: 0.8541 -INFO:local_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8534 -INFO:local_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8534 -INFO:local_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8536 -INFO:local_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8535 -INFO:local_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8540 -INFO:local_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8535 -INFO:master_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8537 -INFO:local_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8544 -INFO:local_logger:Epoch[018/800], Step[0300/0626], Avg Loss: 0.8538 -INFO:local_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8536 -INFO:local_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8534 -INFO:local_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8534 -INFO:local_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8532 -INFO:local_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8542 -INFO:local_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8532 -INFO:local_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8537 -INFO:master_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8535 -INFO:local_logger:Epoch[018/800], Step[0400/0626], Avg Loss: 0.8530 -INFO:local_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8533 -INFO:local_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8536 -INFO:local_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8529 -INFO:local_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8531 -INFO:local_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8534 -INFO:local_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8529 -INFO:local_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8534 -INFO:master_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8533 -INFO:local_logger:Epoch[018/800], Step[0500/0626], Avg Loss: 0.8541 -INFO:local_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8533 -INFO:local_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8525 -INFO:local_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8531 -INFO:local_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8529 -INFO:local_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8528 -INFO:local_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8525 -INFO:local_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8536 -INFO:local_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8528 -INFO:master_logger:Epoch[018/800], Step[0600/0626], Avg Loss: 0.8529 -INFO:local_logger:----- Epoch[018/800], Train Loss: 0.8532, time: 859.28 -INFO:local_logger:Now training epoch 19. LR=0.000072 -INFO:local_logger:----- Epoch[018/800], Train Loss: 0.8524, time: 859.95 -INFO:local_logger:Now training epoch 19. LR=0.000072 -INFO:local_logger:----- Epoch[018/800], Train Loss: 0.8527, time: 855.56 -INFO:local_logger:----- Epoch[018/800], Train Loss: 0.8523, time: 859.27 -INFO:local_logger:----- Epoch[018/800], Train Loss: 0.8527, time: 859.94 -INFO:master_logger:----- Epoch[018/800], Train Loss: 0.8528, time: 855.56 -INFO:local_logger:Now training epoch 19. LR=0.000072 -INFO:local_logger:Now training epoch 19. LR=0.000072 -INFO:local_logger:----- Epoch[018/800], Train Loss: 0.8530, time: 859.29 -INFO:local_logger:Now training epoch 19. LR=0.000072 -INFO:local_logger:----- Epoch[018/800], Train Loss: 0.8528, time: 859.96 -INFO:local_logger:Now training epoch 19. LR=0.000072 -INFO:local_logger:----- Epoch[018/800], Train Loss: 0.8534, time: 859.27 -INFO:local_logger:Now training epoch 19. LR=0.000072 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-18-Loss-0.8526818839083388.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-18-Loss-0.8526818839083388.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-18-Loss-0.8526818839083388.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-18-Loss-0.8526818839083388.pdopt -INFO:local_logger:Now training epoch 19. LR=0.000072 -INFO:master_logger:Now training epoch 19. LR=0.000072 -INFO:local_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8466 -INFO:local_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8442 -INFO:local_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8470 -INFO:local_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8424 -INFO:local_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8531 -INFO:local_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8522 -INFO:master_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8474 -INFO:local_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8452 -INFO:local_logger:Epoch[019/800], Step[0000/0626], Avg Loss: 0.8487 -INFO:local_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8481 -INFO:local_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8485 -INFO:local_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8477 -INFO:local_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8475 -INFO:local_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8477 -INFO:local_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8473 -INFO:local_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8491 -INFO:master_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8480 -INFO:local_logger:Epoch[019/800], Step[0100/0626], Avg Loss: 0.8481 -INFO:local_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8476 -INFO:local_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8477 -INFO:local_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8476 -INFO:local_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8481 -INFO:local_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8480 -INFO:master_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8478 -INFO:local_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8483 -INFO:local_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8476 -INFO:local_logger:Epoch[019/800], Step[0200/0626], Avg Loss: 0.8478 -INFO:local_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8470 -INFO:local_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8465 -INFO:master_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8468 -INFO:local_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8469 -INFO:local_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8470 -INFO:local_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8470 -INFO:local_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8465 -INFO:local_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8468 -INFO:local_logger:Epoch[019/800], Step[0300/0626], Avg Loss: 0.8467 -INFO:local_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8458 -INFO:local_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8460 -INFO:local_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8460 -INFO:local_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8464 -INFO:local_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8461 -INFO:local_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8460 -INFO:local_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8465 -INFO:local_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8463 -INFO:master_logger:Epoch[019/800], Step[0400/0626], Avg Loss: 0.8461 -INFO:local_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8452 -INFO:local_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8454 -INFO:local_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8450 -INFO:local_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8455 -INFO:local_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8451 -INFO:local_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8452 -INFO:local_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8452 -INFO:local_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8455 -INFO:master_logger:Epoch[019/800], Step[0500/0626], Avg Loss: 0.8452 -INFO:local_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8445 -INFO:local_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8442 -INFO:local_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8446 -INFO:local_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8445 -INFO:local_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8447 -INFO:local_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8445 -INFO:master_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8445 -INFO:local_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8443 -INFO:local_logger:Epoch[019/800], Step[0600/0626], Avg Loss: 0.8445 -INFO:local_logger:----- Epoch[019/800], Train Loss: 0.8446, time: 880.98 -INFO:master_logger:----- Epoch[019/800], Train Loss: 0.8443, time: 880.98 -INFO:local_logger:----- Epoch[019/800], Train Loss: 0.8443, time: 885.40 -INFO:local_logger:Now training epoch 20. LR=0.000075 -INFO:local_logger:----- Epoch[019/800], Train Loss: 0.8443, time: 885.43 -INFO:local_logger:Now training epoch 20. LR=0.000075 -INFO:local_logger:----- Epoch[019/800], Train Loss: 0.8444, time: 885.46 -INFO:local_logger:Now training epoch 20. LR=0.000075 -INFO:local_logger:----- Epoch[019/800], Train Loss: 0.8441, time: 885.49 -INFO:local_logger:Now training epoch 20. LR=0.000075 -INFO:local_logger:----- Epoch[019/800], Train Loss: 0.8441, time: 885.53 -INFO:local_logger:Now training epoch 20. LR=0.000075 -INFO:local_logger:----- Epoch[019/800], Train Loss: 0.8443, time: 885.54 -INFO:local_logger:Now training epoch 20. LR=0.000075 -INFO:local_logger:----- Epoch[019/800], Train Loss: 0.8446, time: 885.53 -INFO:local_logger:Now training epoch 20. LR=0.000075 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-19-Loss-0.8445631699389794.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-19-Loss-0.8445631699389794.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-19-Loss-0.8445631699389794.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-19-Loss-0.8445631699389794.pdopt -INFO:local_logger:Now training epoch 20. LR=0.000075 -INFO:master_logger:Now training epoch 20. LR=0.000075 -INFO:local_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8395 -INFO:local_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8579 -INFO:local_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8337 -INFO:master_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8394 -INFO:local_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8377 -INFO:local_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8425 -INFO:local_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8297 -INFO:local_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8371 -INFO:local_logger:Epoch[020/800], Step[0000/0626], Avg Loss: 0.8374 -INFO:local_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8389 -INFO:local_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8399 -INFO:local_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8385 -INFO:local_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8402 -INFO:local_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8398 -INFO:local_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8399 -INFO:master_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8396 -INFO:local_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8398 -INFO:local_logger:Epoch[020/800], Step[0100/0626], Avg Loss: 0.8400 -INFO:local_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8402 -INFO:local_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8410 -INFO:local_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8400 -INFO:local_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8406 -INFO:local_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8409 -INFO:master_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8408 -INFO:local_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8403 -INFO:local_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8416 -INFO:local_logger:Epoch[020/800], Step[0200/0626], Avg Loss: 0.8415 -INFO:local_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8399 -INFO:local_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8411 -INFO:local_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8404 -INFO:local_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8406 -INFO:local_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8406 -INFO:master_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8406 -INFO:local_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8403 -INFO:local_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8415 -INFO:local_logger:Epoch[020/800], Step[0300/0626], Avg Loss: 0.8403 -INFO:local_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8397 -INFO:local_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8397 -INFO:local_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8393 -INFO:local_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8400 -INFO:local_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8395 -INFO:local_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8400 -INFO:local_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8397 -INFO:master_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8398 -INFO:local_logger:Epoch[020/800], Step[0400/0626], Avg Loss: 0.8404 -INFO:local_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8384 -INFO:local_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8386 -INFO:local_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8384 -INFO:local_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8388 -INFO:local_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8383 -INFO:local_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8387 -INFO:local_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8384 -INFO:master_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8386 -INFO:local_logger:Epoch[020/800], Step[0500/0626], Avg Loss: 0.8391 -INFO:local_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8387 -INFO:local_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8378 -INFO:local_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8380 -INFO:local_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8382 -INFO:local_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8383 -INFO:local_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8381 -INFO:master_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8382 -INFO:local_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8380 -INFO:local_logger:Epoch[020/800], Step[0600/0626], Avg Loss: 0.8383 -INFO:local_logger:----- Epoch[020/800], Train Loss: 0.8379, time: 856.21 -INFO:local_logger:Now training epoch 21. LR=0.000079 -INFO:local_logger:----- Epoch[020/800], Train Loss: 0.8378, time: 856.64 -INFO:local_logger:----- Epoch[020/800], Train Loss: 0.8381, time: 856.54 -INFO:local_logger:Now training epoch 21. LR=0.000079 -INFO:local_logger:Now training epoch 21. LR=0.000079 -INFO:local_logger:----- Epoch[020/800], Train Loss: 0.8378, time: 856.55 -INFO:local_logger:Now training epoch 21. LR=0.000079 -INFO:local_logger:----- Epoch[020/800], Train Loss: 0.8378, time: 856.54 -INFO:local_logger:Now training epoch 21. LR=0.000079 -INFO:local_logger:----- Epoch[020/800], Train Loss: 0.8385, time: 856.62 -INFO:local_logger:----- Epoch[020/800], Train Loss: 0.8379, time: 856.58 -INFO:local_logger:Now training epoch 21. LR=0.000079 -INFO:local_logger:Now training epoch 21. LR=0.000079 -INFO:local_logger:----- Epoch[020/800], Train Loss: 0.8377, time: 853.74 -INFO:master_logger:----- Epoch[020/800], Train Loss: 0.8379, time: 853.74 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-20-Loss-0.837697342612629.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-20-Loss-0.837697342612629.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-20-Loss-0.837697342612629.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-20-Loss-0.837697342612629.pdopt -INFO:local_logger:Now training epoch 21. LR=0.000079 -INFO:master_logger:Now training epoch 21. LR=0.000079 -INFO:local_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8247 -INFO:local_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8468 -INFO:master_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8311 -INFO:local_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8307 -INFO:local_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8301 -INFO:local_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8220 -INFO:local_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8352 -INFO:local_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8284 -INFO:local_logger:Epoch[021/800], Step[0000/0626], Avg Loss: 0.8313 -INFO:local_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8345 -INFO:local_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8327 -INFO:local_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8336 -INFO:local_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8344 -INFO:local_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8331 -INFO:local_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8343 -INFO:master_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8338 -INFO:local_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8339 -INFO:local_logger:Epoch[021/800], Step[0100/0626], Avg Loss: 0.8339 -INFO:local_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8343 -INFO:local_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8340 -INFO:local_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8344 -INFO:local_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8339 -INFO:master_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8338 -INFO:local_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8333 -INFO:local_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8332 -INFO:local_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8337 -INFO:local_logger:Epoch[021/800], Step[0200/0626], Avg Loss: 0.8335 -INFO:local_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8328 -INFO:local_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8336 -INFO:local_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8330 -INFO:local_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8331 -INFO:local_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8331 -INFO:local_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8337 -INFO:local_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8328 -INFO:local_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8336 -INFO:master_logger:Epoch[021/800], Step[0300/0626], Avg Loss: 0.8332 -INFO:local_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8324 -INFO:local_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8322 -INFO:local_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8329 -INFO:local_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8326 -INFO:local_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8333 -INFO:local_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8324 -INFO:local_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8332 -INFO:master_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8327 -INFO:local_logger:Epoch[021/800], Step[0400/0626], Avg Loss: 0.8328 -INFO:local_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8323 -INFO:local_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8322 -INFO:master_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8321 -INFO:local_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8325 -INFO:local_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8319 -INFO:local_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8317 -INFO:local_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8323 -INFO:local_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8319 -INFO:local_logger:Epoch[021/800], Step[0500/0626], Avg Loss: 0.8320 -INFO:local_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8319 -INFO:local_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8316 -INFO:local_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8318 -INFO:local_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8314 -INFO:local_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8317 -INFO:local_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8314 -INFO:local_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8315 -INFO:master_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8316 -INFO:local_logger:Epoch[021/800], Step[0600/0626], Avg Loss: 0.8317 -INFO:local_logger:----- Epoch[021/800], Train Loss: 0.8314, time: 903.73 -INFO:master_logger:----- Epoch[021/800], Train Loss: 0.8313, time: 903.73 -INFO:local_logger:----- Epoch[021/800], Train Loss: 0.8311, time: 908.09 -INFO:local_logger:Now training epoch 22. LR=0.000083 -INFO:local_logger:----- Epoch[021/800], Train Loss: 0.8312, time: 908.50 -INFO:local_logger:Now training epoch 22. LR=0.000083 -INFO:local_logger:----- Epoch[021/800], Train Loss: 0.8312, time: 908.98 -INFO:local_logger:Now training epoch 22. LR=0.000083 -INFO:local_logger:----- Epoch[021/800], Train Loss: 0.8314, time: 908.52 -INFO:local_logger:Now training epoch 22. LR=0.000083 -INFO:local_logger:----- Epoch[021/800], Train Loss: 0.8314, time: 908.52 -INFO:local_logger:Now training epoch 22. LR=0.000083 -INFO:local_logger:----- Epoch[021/800], Train Loss: 0.8311, time: 908.52 -INFO:local_logger:Now training epoch 22. LR=0.000083 -INFO:local_logger:----- Epoch[021/800], Train Loss: 0.8317, time: 908.52 -INFO:local_logger:Now training epoch 22. LR=0.000083 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-21-Loss-0.8314437446567381.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-21-Loss-0.8314437446567381.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-21-Loss-0.8314437446567381.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-21-Loss-0.8314437446567381.pdopt -INFO:local_logger:Now training epoch 22. LR=0.000083 -INFO:master_logger:Now training epoch 22. LR=0.000083 -INFO:local_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8238 -INFO:local_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8287 -INFO:master_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8236 -INFO:local_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8314 -INFO:local_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8120 -INFO:local_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8206 -INFO:local_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8245 -INFO:local_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8214 -INFO:local_logger:Epoch[022/800], Step[0000/0626], Avg Loss: 0.8266 -INFO:local_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8256 -INFO:local_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8255 -INFO:local_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8256 -INFO:local_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8256 -INFO:local_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8274 -INFO:local_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8262 -INFO:local_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8270 -INFO:master_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8262 -INFO:local_logger:Epoch[022/800], Step[0100/0626], Avg Loss: 0.8269 -INFO:local_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8246 -INFO:local_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8258 -INFO:local_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8262 -INFO:local_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8250 -INFO:master_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8252 -INFO:local_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8250 -INFO:local_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8256 -INFO:local_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8245 -INFO:local_logger:Epoch[022/800], Step[0200/0626], Avg Loss: 0.8253 -INFO:local_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8236 -INFO:local_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8237 -INFO:local_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8235 -INFO:local_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8239 -INFO:local_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8245 -INFO:local_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8242 -INFO:local_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8232 -INFO:local_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8245 -INFO:master_logger:Epoch[022/800], Step[0300/0626], Avg Loss: 0.8239 -INFO:local_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8234 -INFO:local_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8226 -INFO:local_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8230 -INFO:local_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8239 -INFO:local_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8240 -INFO:local_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8231 -INFO:local_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8231 -INFO:local_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8235 -INFO:master_logger:Epoch[022/800], Step[0400/0626], Avg Loss: 0.8233 -INFO:local_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8231 -INFO:local_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8231 -INFO:local_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8222 -INFO:local_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8226 -INFO:master_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8225 -INFO:local_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8222 -INFO:local_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8223 -INFO:local_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8226 -INFO:local_logger:Epoch[022/800], Step[0500/0626], Avg Loss: 0.8222 -INFO:local_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8221 -INFO:local_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8219 -INFO:local_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8219 -INFO:local_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8220 -INFO:local_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8223 -INFO:local_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8226 -INFO:master_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8222 -INFO:local_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8228 -INFO:local_logger:Epoch[022/800], Step[0600/0626], Avg Loss: 0.8222 -INFO:local_logger:----- Epoch[022/800], Train Loss: 0.8221, time: 859.97 -INFO:master_logger:----- Epoch[022/800], Train Loss: 0.8221, time: 859.97 -INFO:local_logger:----- Epoch[022/800], Train Loss: 0.8221, time: 863.27 -INFO:local_logger:Now training epoch 23. LR=0.000087 -INFO:local_logger:----- Epoch[022/800], Train Loss: 0.8219, time: 864.02 -INFO:local_logger:Now training epoch 23. LR=0.000087 -INFO:local_logger:----- Epoch[022/800], Train Loss: 0.8219, time: 863.88 -INFO:local_logger:Now training epoch 23. LR=0.000087 -INFO:local_logger:----- Epoch[022/800], Train Loss: 0.8227, time: 863.94 -INFO:local_logger:Now training epoch 23. LR=0.000087 -INFO:local_logger:----- Epoch[022/800], Train Loss: 0.8220, time: 863.94 -INFO:local_logger:Now training epoch 23. LR=0.000087 -INFO:local_logger:----- Epoch[022/800], Train Loss: 0.8223, time: 863.94 -INFO:local_logger:Now training epoch 23. LR=0.000087 -INFO:local_logger:----- Epoch[022/800], Train Loss: 0.8219, time: 863.94 -INFO:local_logger:Now training epoch 23. LR=0.000087 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-22-Loss-0.8221496500572387.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-22-Loss-0.8221496500572387.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-22-Loss-0.8221496500572387.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-22-Loss-0.8221496500572387.pdopt -INFO:local_logger:Now training epoch 23. LR=0.000087 -INFO:master_logger:Now training epoch 23. LR=0.000087 -INFO:local_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8185 -INFO:local_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8079 -INFO:local_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8203 -INFO:local_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8216 -INFO:master_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8178 -INFO:local_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8182 -INFO:local_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8154 -INFO:local_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8235 -INFO:local_logger:Epoch[023/800], Step[0000/0626], Avg Loss: 0.8168 -INFO:local_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8184 -INFO:local_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8174 -INFO:local_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8173 -INFO:local_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8176 -INFO:local_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8182 -INFO:master_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8177 -INFO:local_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8171 -INFO:local_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8178 -INFO:local_logger:Epoch[023/800], Step[0100/0626], Avg Loss: 0.8173 -INFO:local_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8173 -INFO:local_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8172 -INFO:local_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8169 -INFO:local_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8168 -INFO:local_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8171 -INFO:local_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8171 -INFO:local_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8172 -INFO:local_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8171 -INFO:master_logger:Epoch[023/800], Step[0200/0626], Avg Loss: 0.8171 -INFO:local_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8166 -INFO:local_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8163 -INFO:local_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8166 -INFO:local_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8167 -INFO:local_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8164 -INFO:master_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8166 -INFO:local_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8170 -INFO:local_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8170 -INFO:local_logger:Epoch[023/800], Step[0300/0626], Avg Loss: 0.8166 -INFO:local_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8161 -INFO:local_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8159 -INFO:local_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8161 -INFO:local_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8159 -INFO:local_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8158 -INFO:local_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8165 -INFO:master_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8161 -INFO:local_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8163 -INFO:local_logger:Epoch[023/800], Step[0400/0626], Avg Loss: 0.8165 -INFO:local_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8157 -INFO:local_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8160 -INFO:local_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8161 -INFO:local_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8155 -INFO:master_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8157 -INFO:local_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8154 -INFO:local_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8155 -INFO:local_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8156 -INFO:local_logger:Epoch[023/800], Step[0500/0626], Avg Loss: 0.8155 -INFO:local_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8151 -INFO:local_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8149 -INFO:local_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8154 -INFO:local_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8151 -INFO:local_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8152 -INFO:local_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8149 -INFO:master_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8151 -INFO:local_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8151 -INFO:local_logger:Epoch[023/800], Step[0600/0626], Avg Loss: 0.8153 -INFO:local_logger:----- Epoch[023/800], Train Loss: 0.8150, time: 884.65 -INFO:master_logger:----- Epoch[023/800], Train Loss: 0.8150, time: 884.65 -INFO:local_logger:----- Epoch[023/800], Train Loss: 0.8151, time: 888.50 -INFO:local_logger:Now training epoch 24. LR=0.000090 -INFO:local_logger:----- Epoch[023/800], Train Loss: 0.8152, time: 889.17 -INFO:local_logger:Now training epoch 24. LR=0.000090 -INFO:local_logger:----- Epoch[023/800], Train Loss: 0.8152, time: 888.59 -INFO:local_logger:Now training epoch 24. LR=0.000090 -INFO:local_logger:----- Epoch[023/800], Train Loss: 0.8148, time: 888.85 -INFO:local_logger:Now training epoch 24. LR=0.000090 -INFO:local_logger:----- Epoch[023/800], Train Loss: 0.8149, time: 888.51 -INFO:local_logger:Now training epoch 24. LR=0.000090 -INFO:local_logger:----- Epoch[023/800], Train Loss: 0.8149, time: 888.52 -INFO:local_logger:Now training epoch 24. LR=0.000090 -INFO:local_logger:----- Epoch[023/800], Train Loss: 0.8146, time: 888.53 -INFO:local_logger:Now training epoch 24. LR=0.000090 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-23-Loss-0.8150022067021212.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-23-Loss-0.8150022067021212.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-23-Loss-0.8150022067021212.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-23-Loss-0.8150022067021212.pdopt -INFO:local_logger:Now training epoch 24. LR=0.000090 -INFO:master_logger:Now training epoch 24. LR=0.000090 -INFO:local_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8150 -INFO:local_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8170 -INFO:local_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8108 -INFO:local_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8043 -INFO:local_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8090 -INFO:local_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8224 -INFO:local_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8215 -INFO:master_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8135 -INFO:local_logger:Epoch[024/800], Step[0000/0626], Avg Loss: 0.8082 -INFO:local_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8115 -INFO:local_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8115 -INFO:local_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8110 -INFO:local_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8112 -INFO:local_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8117 -INFO:local_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8124 -INFO:local_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8113 -INFO:master_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8114 -INFO:local_logger:Epoch[024/800], Step[0100/0626], Avg Loss: 0.8105 -INFO:local_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8104 -INFO:local_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8110 -INFO:local_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8111 -INFO:local_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8115 -INFO:local_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8114 -INFO:local_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8115 -INFO:local_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8108 -INFO:local_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8112 -INFO:master_logger:Epoch[024/800], Step[0200/0626], Avg Loss: 0.8111 -INFO:local_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8101 -INFO:local_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8100 -INFO:local_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8106 -INFO:local_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8106 -INFO:local_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8099 -INFO:local_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8100 -INFO:local_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8101 -INFO:local_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8107 -INFO:master_logger:Epoch[024/800], Step[0300/0626], Avg Loss: 0.8103 -INFO:local_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8096 -INFO:local_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8096 -INFO:local_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8096 -INFO:local_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8099 -INFO:local_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8098 -INFO:local_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8097 -INFO:local_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8101 -INFO:master_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8098 -INFO:local_logger:Epoch[024/800], Step[0400/0626], Avg Loss: 0.8103 -INFO:local_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8089 -INFO:local_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8095 -INFO:local_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8094 -INFO:local_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8090 -INFO:local_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8094 -INFO:local_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8090 -INFO:local_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8091 -INFO:master_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8092 -INFO:local_logger:Epoch[024/800], Step[0500/0626], Avg Loss: 0.8090 -INFO:local_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8088 -INFO:local_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8088 -INFO:local_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8093 -INFO:local_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8088 -INFO:local_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8087 -INFO:local_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8091 -INFO:local_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8091 -INFO:master_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8089 -INFO:local_logger:Epoch[024/800], Step[0600/0626], Avg Loss: 0.8088 -INFO:local_logger:----- Epoch[024/800], Train Loss: 0.8087, time: 870.26 -INFO:local_logger:Now training epoch 25. LR=0.000094 -INFO:local_logger:----- Epoch[024/800], Train Loss: 0.8084, time: 870.28 -INFO:local_logger:Now training epoch 25. LR=0.000094 -INFO:local_logger:----- Epoch[024/800], Train Loss: 0.8092, time: 867.64 -INFO:master_logger:----- Epoch[024/800], Train Loss: 0.8088, time: 867.64 -INFO:local_logger:----- Epoch[024/800], Train Loss: 0.8090, time: 870.78 -INFO:local_logger:Now training epoch 25. LR=0.000094 -INFO:local_logger:----- Epoch[024/800], Train Loss: 0.8089, time: 870.77 -INFO:local_logger:Now training epoch 25. LR=0.000094 -INFO:local_logger:----- Epoch[024/800], Train Loss: 0.8086, time: 870.75 -INFO:local_logger:Now training epoch 25. LR=0.000094 -INFO:local_logger:----- Epoch[024/800], Train Loss: 0.8086, time: 870.78 -INFO:local_logger:----- Epoch[024/800], Train Loss: 0.8087, time: 870.75 -INFO:local_logger:Now training epoch 25. LR=0.000094 -INFO:local_logger:Now training epoch 25. LR=0.000094 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-24-Loss-0.8091736378081739.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-24-Loss-0.8091736378081739.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-24-Loss-0.8091736378081739.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-24-Loss-0.8091736378081739.pdopt -INFO:local_logger:Now training epoch 25. LR=0.000094 -INFO:master_logger:Now training epoch 25. LR=0.000094 -INFO:local_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.8030 -INFO:local_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.8073 -INFO:local_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.7969 -INFO:master_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.8051 -INFO:local_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.8085 -INFO:local_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.8000 -INFO:local_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.8034 -INFO:local_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.8158 -INFO:local_logger:Epoch[025/800], Step[0000/0626], Avg Loss: 0.8061 -INFO:local_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8055 -INFO:local_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8046 -INFO:local_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8059 -INFO:local_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8052 -INFO:local_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8053 -INFO:local_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8054 -INFO:local_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8052 -INFO:local_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8057 -INFO:master_logger:Epoch[025/800], Step[0100/0626], Avg Loss: 0.8053 -INFO:local_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8049 -INFO:local_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8050 -INFO:local_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8051 -INFO:local_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8047 -INFO:local_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8054 -INFO:local_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8050 -INFO:local_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8057 -INFO:master_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8051 -INFO:local_logger:Epoch[025/800], Step[0200/0626], Avg Loss: 0.8051 -INFO:local_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8052 -INFO:local_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8049 -INFO:local_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8046 -INFO:local_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8044 -INFO:local_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8048 -INFO:local_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8046 -INFO:master_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8047 -INFO:local_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8044 -INFO:local_logger:Epoch[025/800], Step[0300/0626], Avg Loss: 0.8044 -INFO:local_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8043 -INFO:master_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8042 -INFO:local_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8042 -INFO:local_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8043 -INFO:local_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8040 -INFO:local_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8045 -INFO:local_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8038 -INFO:local_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8043 -INFO:local_logger:Epoch[025/800], Step[0400/0626], Avg Loss: 0.8043 -INFO:local_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8040 -INFO:local_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8039 -INFO:local_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8032 -INFO:local_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8034 -INFO:master_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8036 -INFO:local_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8037 -INFO:local_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8037 -INFO:local_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8037 -INFO:local_logger:Epoch[025/800], Step[0500/0626], Avg Loss: 0.8035 -INFO:local_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8036 -INFO:local_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8029 -INFO:master_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8033 -INFO:local_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8032 -INFO:local_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8032 -INFO:local_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8030 -INFO:local_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8035 -INFO:local_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8036 -INFO:local_logger:Epoch[025/800], Step[0600/0626], Avg Loss: 0.8035 -INFO:local_logger:----- Epoch[025/800], Train Loss: 0.8033, time: 888.93 -INFO:local_logger:Now training epoch 26. LR=0.000098 -INFO:local_logger:----- Epoch[025/800], Train Loss: 0.8035, time: 885.88 -INFO:master_logger:----- Epoch[025/800], Train Loss: 0.8032, time: 885.88 -INFO:local_logger:----- Epoch[025/800], Train Loss: 0.8029, time: 889.72 -INFO:local_logger:Now training epoch 26. LR=0.000098 -INFO:local_logger:----- Epoch[025/800], Train Loss: 0.8033, time: 889.81 -INFO:local_logger:Now training epoch 26. LR=0.000098 -INFO:local_logger:----- Epoch[025/800], Train Loss: 0.8031, time: 889.83 -INFO:local_logger:Now training epoch 26. LR=0.000098 -INFO:local_logger:----- Epoch[025/800], Train Loss: 0.8031, time: 890.36 -INFO:local_logger:Now training epoch 26. LR=0.000098 -INFO:local_logger:----- Epoch[025/800], Train Loss: 0.8035, time: 890.36 -INFO:local_logger:Now training epoch 26. LR=0.000098 -INFO:local_logger:----- Epoch[025/800], Train Loss: 0.8028, time: 889.87 -INFO:local_logger:Now training epoch 26. LR=0.000098 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-25-Loss-0.8034991228641365.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-25-Loss-0.8034991228641365.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-25-Loss-0.8034991228641365.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-25-Loss-0.8034991228641365.pdopt -INFO:local_logger:Now training epoch 26. LR=0.000098 -INFO:master_logger:Now training epoch 26. LR=0.000098 -INFO:local_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.7943 -INFO:local_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.7989 -INFO:master_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.7988 -INFO:local_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.7899 -INFO:local_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.7949 -INFO:local_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.7996 -INFO:local_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.8022 -INFO:local_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.8063 -INFO:local_logger:Epoch[026/800], Step[0000/0626], Avg Loss: 0.8043 -INFO:local_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7989 -INFO:local_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7997 -INFO:local_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7993 -INFO:local_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7994 -INFO:local_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7998 -INFO:local_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7976 -INFO:master_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7992 -INFO:local_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7992 -INFO:local_logger:Epoch[026/800], Step[0100/0626], Avg Loss: 0.7992 -INFO:local_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7993 -INFO:local_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7986 -INFO:local_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7985 -INFO:local_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7986 -INFO:local_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7987 -INFO:local_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7988 -INFO:local_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7980 -INFO:master_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7988 -INFO:local_logger:Epoch[026/800], Step[0200/0626], Avg Loss: 0.7999 -INFO:local_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7980 -INFO:local_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7983 -INFO:local_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7983 -INFO:local_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7992 -INFO:local_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7980 -INFO:local_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7983 -INFO:local_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7985 -INFO:master_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7983 -INFO:local_logger:Epoch[026/800], Step[0300/0626], Avg Loss: 0.7979 -INFO:local_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7977 -INFO:local_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7973 -INFO:local_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7976 -INFO:local_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7977 -INFO:local_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7975 -INFO:local_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7976 -INFO:local_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7979 -INFO:master_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7977 -INFO:local_logger:Epoch[026/800], Step[0400/0626], Avg Loss: 0.7984 -INFO:local_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7970 -INFO:local_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7967 -INFO:local_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7967 -INFO:local_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7976 -INFO:local_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7970 -INFO:local_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7965 -INFO:master_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7969 -INFO:local_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7968 -INFO:local_logger:Epoch[026/800], Step[0500/0626], Avg Loss: 0.7969 -INFO:local_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7961 -INFO:local_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7961 -INFO:local_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7959 -INFO:local_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7961 -INFO:local_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7969 -INFO:local_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7961 -INFO:master_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7962 -INFO:local_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7963 -INFO:local_logger:Epoch[026/800], Step[0600/0626], Avg Loss: 0.7964 -INFO:local_logger:----- Epoch[026/800], Train Loss: 0.7968, time: 871.34 -INFO:local_logger:Now training epoch 27. LR=0.000102 -INFO:local_logger:----- Epoch[026/800], Train Loss: 0.7961, time: 867.96 -INFO:master_logger:----- Epoch[026/800], Train Loss: 0.7962, time: 867.96 -INFO:local_logger:----- Epoch[026/800], Train Loss: 0.7959, time: 871.51 -INFO:local_logger:Now training epoch 27. LR=0.000102 -INFO:local_logger:----- Epoch[026/800], Train Loss: 0.7960, time: 871.97 -INFO:local_logger:Now training epoch 27. LR=0.000102 -INFO:local_logger:----- Epoch[026/800], Train Loss: 0.7960, time: 871.99 -INFO:local_logger:Now training epoch 27. LR=0.000102 -INFO:local_logger:----- Epoch[026/800], Train Loss: 0.7963, time: 872.03 -INFO:local_logger:Now training epoch 27. LR=0.000102 -INFO:local_logger:----- Epoch[026/800], Train Loss: 0.7962, time: 872.93 -INFO:local_logger:Now training epoch 27. LR=0.000102 -INFO:local_logger:----- Epoch[026/800], Train Loss: 0.7961, time: 872.05 -INFO:local_logger:Now training epoch 27. LR=0.000102 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-26-Loss-0.796081899062454.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-26-Loss-0.796081899062454.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-26-Loss-0.796081899062454.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-26-Loss-0.796081899062454.pdopt -INFO:local_logger:Now training epoch 27. LR=0.000102 -INFO:master_logger:Now training epoch 27. LR=0.000102 -INFO:local_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.7991 -INFO:local_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.8009 -INFO:local_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.7909 -INFO:master_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.7963 -INFO:local_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.8054 -INFO:local_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.7875 -INFO:local_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.7990 -INFO:local_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.7946 -INFO:local_logger:Epoch[027/800], Step[0000/0626], Avg Loss: 0.7932 -INFO:local_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7923 -INFO:local_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7922 -INFO:local_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7923 -INFO:master_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7920 -INFO:local_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7924 -INFO:local_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7913 -INFO:local_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7925 -INFO:local_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7917 -INFO:local_logger:Epoch[027/800], Step[0100/0626], Avg Loss: 0.7914 -INFO:local_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7922 -INFO:local_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7922 -INFO:local_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7924 -INFO:local_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7924 -INFO:local_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7915 -INFO:local_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7919 -INFO:master_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7920 -INFO:local_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7914 -INFO:local_logger:Epoch[027/800], Step[0200/0626], Avg Loss: 0.7921 -INFO:local_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7914 -INFO:local_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7917 -INFO:local_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7905 -INFO:local_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7909 -INFO:master_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7911 -INFO:local_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7907 -INFO:local_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7915 -INFO:local_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7912 -INFO:local_logger:Epoch[027/800], Step[0300/0626], Avg Loss: 0.7909 -INFO:local_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7909 -INFO:local_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7900 -INFO:local_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7909 -INFO:local_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7902 -INFO:local_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7904 -INFO:master_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7906 -INFO:local_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7907 -INFO:local_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7903 -INFO:local_logger:Epoch[027/800], Step[0400/0626], Avg Loss: 0.7911 -INFO:local_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7902 -INFO:local_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7899 -INFO:local_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7898 -INFO:local_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7903 -INFO:local_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7903 -INFO:master_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7900 -INFO:local_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7904 -INFO:local_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7895 -INFO:local_logger:Epoch[027/800], Step[0500/0626], Avg Loss: 0.7898 -INFO:local_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7896 -INFO:local_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7893 -INFO:local_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7898 -INFO:local_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7894 -INFO:local_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7893 -INFO:local_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7899 -INFO:local_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7891 -INFO:local_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7894 -INFO:master_logger:Epoch[027/800], Step[0600/0626], Avg Loss: 0.7895 -INFO:local_logger:----- Epoch[027/800], Train Loss: 0.7892, time: 878.78 -INFO:local_logger:Now training epoch 28. LR=0.000105 -INFO:local_logger:----- Epoch[027/800], Train Loss: 0.7897, time: 878.81 -INFO:local_logger:Now training epoch 28. LR=0.000105 -INFO:local_logger:----- Epoch[027/800], Train Loss: 0.7894, time: 875.73 -INFO:master_logger:----- Epoch[027/800], Train Loss: 0.7893, time: 875.73 -INFO:local_logger:----- Epoch[027/800], Train Loss: 0.7890, time: 878.88 -INFO:local_logger:Now training epoch 28. LR=0.000105 -INFO:local_logger:----- Epoch[027/800], Train Loss: 0.7892, time: 878.97 -INFO:local_logger:Now training epoch 28. LR=0.000105 -INFO:local_logger:----- Epoch[027/800], Train Loss: 0.7891, time: 879.29 -INFO:local_logger:Now training epoch 28. LR=0.000105 -INFO:local_logger:----- Epoch[027/800], Train Loss: 0.7895, time: 879.32 -INFO:local_logger:Now training epoch 28. LR=0.000105 -INFO:local_logger:----- Epoch[027/800], Train Loss: 0.7895, time: 879.32 -INFO:local_logger:Now training epoch 28. LR=0.000105 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-27-Loss-0.7894227700390196.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-27-Loss-0.7894227700390196.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-27-Loss-0.7894227700390196.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-27-Loss-0.7894227700390196.pdopt -INFO:local_logger:Now training epoch 28. LR=0.000105 -INFO:master_logger:Now training epoch 28. LR=0.000105 -INFO:local_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7943 -INFO:master_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7880 -INFO:local_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7931 -INFO:local_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7814 -INFO:local_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7872 -INFO:local_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7910 -INFO:local_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7793 -INFO:local_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7897 -INFO:local_logger:Epoch[028/800], Step[0000/0626], Avg Loss: 0.7883 -INFO:local_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7855 -INFO:local_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7868 -INFO:local_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7873 -INFO:local_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7853 -INFO:local_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7862 -INFO:local_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7875 -INFO:master_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7866 -INFO:local_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7874 -INFO:local_logger:Epoch[028/800], Step[0100/0626], Avg Loss: 0.7871 -INFO:local_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7865 -INFO:local_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7866 -INFO:local_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7859 -INFO:local_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7855 -INFO:local_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7855 -INFO:local_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7858 -INFO:local_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7860 -INFO:master_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7861 -INFO:local_logger:Epoch[028/800], Step[0200/0626], Avg Loss: 0.7865 -INFO:local_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7850 -INFO:local_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7850 -INFO:local_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7851 -INFO:local_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7845 -INFO:master_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7851 -INFO:local_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7851 -INFO:local_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7855 -INFO:local_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7846 -INFO:local_logger:Epoch[028/800], Step[0300/0626], Avg Loss: 0.7860 -INFO:local_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7841 -INFO:local_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7844 -INFO:local_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7839 -INFO:local_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7850 -INFO:master_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7843 -INFO:local_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7837 -INFO:local_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7843 -INFO:local_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7843 -INFO:local_logger:Epoch[028/800], Step[0400/0626], Avg Loss: 0.7845 -INFO:local_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7842 -INFO:local_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7838 -INFO:local_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7835 -INFO:local_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7841 -INFO:local_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7840 -INFO:local_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7839 -INFO:master_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7840 -INFO:local_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7846 -INFO:local_logger:Epoch[028/800], Step[0500/0626], Avg Loss: 0.7837 -INFO:local_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7838 -INFO:local_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7831 -INFO:local_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7837 -INFO:local_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7836 -INFO:local_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7836 -INFO:local_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7840 -INFO:local_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7839 -INFO:local_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7833 -INFO:master_logger:Epoch[028/800], Step[0600/0626], Avg Loss: 0.7836 -INFO:local_logger:----- Epoch[028/800], Train Loss: 0.7837, time: 871.38 -INFO:local_logger:Now training epoch 29. LR=0.000109 -INFO:local_logger:----- Epoch[028/800], Train Loss: 0.7835, time: 868.22 -INFO:master_logger:----- Epoch[028/800], Train Loss: 0.7835, time: 868.22 -INFO:local_logger:----- Epoch[028/800], Train Loss: 0.7831, time: 872.18 -INFO:local_logger:Now training epoch 29. LR=0.000109 -INFO:local_logger:----- Epoch[028/800], Train Loss: 0.7830, time: 873.00 -INFO:local_logger:----- Epoch[028/800], Train Loss: 0.7835, time: 871.85 -INFO:local_logger:Now training epoch 29. LR=0.000109 -INFO:local_logger:Now training epoch 29. LR=0.000109 -INFO:local_logger:----- Epoch[028/800], Train Loss: 0.7834, time: 871.89 -INFO:local_logger:Now training epoch 29. LR=0.000109 -INFO:local_logger:----- Epoch[028/800], Train Loss: 0.7837, time: 873.03 -INFO:local_logger:----- Epoch[028/800], Train Loss: 0.7838, time: 872.29 -INFO:local_logger:Now training epoch 29. LR=0.000109 -INFO:local_logger:Now training epoch 29. LR=0.000109 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-28-Loss-0.783455797444855.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-28-Loss-0.783455797444855.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-28-Loss-0.783455797444855.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-28-Loss-0.783455797444855.pdopt -INFO:local_logger:Now training epoch 29. LR=0.000109 -INFO:master_logger:Now training epoch 29. LR=0.000109 -INFO:local_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7766 -INFO:local_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7720 -INFO:master_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7785 -INFO:local_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7816 -INFO:local_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7862 -INFO:local_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7662 -INFO:local_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7767 -INFO:local_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7916 -INFO:local_logger:Epoch[029/800], Step[0000/0626], Avg Loss: 0.7771 -INFO:local_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7823 -INFO:local_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7822 -INFO:local_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7822 -INFO:local_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7816 -INFO:local_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7818 -INFO:local_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7815 -INFO:local_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7809 -INFO:master_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7818 -INFO:local_logger:Epoch[029/800], Step[0100/0626], Avg Loss: 0.7822 -INFO:local_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7802 -INFO:local_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7802 -INFO:local_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7795 -INFO:local_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7806 -INFO:local_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7806 -INFO:local_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7807 -INFO:local_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7800 -INFO:local_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7800 -INFO:master_logger:Epoch[029/800], Step[0200/0626], Avg Loss: 0.7802 -INFO:local_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7794 -INFO:local_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7794 -INFO:local_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7796 -INFO:local_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7798 -INFO:master_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7795 -INFO:local_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7797 -INFO:local_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7796 -INFO:local_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7797 -INFO:local_logger:Epoch[029/800], Step[0300/0626], Avg Loss: 0.7789 -INFO:local_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7788 -INFO:local_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7781 -INFO:local_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7786 -INFO:local_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7787 -INFO:local_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7785 -INFO:local_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7787 -INFO:local_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7786 -INFO:master_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7786 -INFO:local_logger:Epoch[029/800], Step[0400/0626], Avg Loss: 0.7788 -INFO:local_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7782 -INFO:local_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7776 -INFO:local_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7779 -INFO:local_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7781 -INFO:local_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7782 -INFO:local_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7780 -INFO:master_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7781 -INFO:local_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7781 -INFO:local_logger:Epoch[029/800], Step[0500/0626], Avg Loss: 0.7782 -INFO:local_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7776 -INFO:local_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7777 -INFO:local_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7777 -INFO:local_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7778 -INFO:local_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7776 -INFO:local_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7776 -INFO:master_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7776 -INFO:local_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7772 -INFO:local_logger:Epoch[029/800], Step[0600/0626], Avg Loss: 0.7776 -INFO:local_logger:----- Epoch[029/800], Train Loss: 0.7778, time: 869.47 -INFO:local_logger:Now training epoch 30. LR=0.000113 -INFO:local_logger:----- Epoch[029/800], Train Loss: 0.7776, time: 865.73 -INFO:local_logger:----- Epoch[029/800], Train Loss: 0.7773, time: 868.98 -INFO:master_logger:----- Epoch[029/800], Train Loss: 0.7776, time: 865.73 -INFO:local_logger:Now training epoch 30. LR=0.000113 -INFO:local_logger:----- Epoch[029/800], Train Loss: 0.7777, time: 869.01 -INFO:local_logger:Now training epoch 30. LR=0.000113 -INFO:local_logger:----- Epoch[029/800], Train Loss: 0.7777, time: 869.04 -INFO:local_logger:Now training epoch 30. LR=0.000113 -INFO:local_logger:----- Epoch[029/800], Train Loss: 0.7774, time: 869.04 -INFO:local_logger:Now training epoch 30. LR=0.000113 -INFO:local_logger:----- Epoch[029/800], Train Loss: 0.7776, time: 869.04 -INFO:local_logger:Now training epoch 30. LR=0.000113 -INFO:local_logger:----- Epoch[029/800], Train Loss: 0.7776, time: 869.04 -INFO:local_logger:Now training epoch 30. LR=0.000113 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-29-Loss-0.77762673581754.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-29-Loss-0.77762673581754.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-29-Loss-0.77762673581754.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-29-Loss-0.77762673581754.pdopt -INFO:local_logger:Now training epoch 30. LR=0.000113 -INFO:master_logger:Now training epoch 30. LR=0.000113 -INFO:local_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7671 -INFO:master_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7736 -INFO:local_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7764 -INFO:local_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7666 -INFO:local_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7815 -INFO:local_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7786 -INFO:local_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7819 -INFO:local_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7683 -INFO:local_logger:Epoch[030/800], Step[0000/0626], Avg Loss: 0.7686 -INFO:local_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7759 -INFO:local_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7765 -INFO:local_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7745 -INFO:local_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7773 -INFO:local_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7759 -INFO:local_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7772 -INFO:local_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7763 -INFO:master_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7762 -INFO:local_logger:Epoch[030/800], Step[0100/0626], Avg Loss: 0.7756 -INFO:local_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7744 -INFO:local_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7748 -INFO:local_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7754 -INFO:local_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7743 -INFO:local_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7742 -INFO:local_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7749 -INFO:master_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7747 -INFO:local_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7747 -INFO:local_logger:Epoch[030/800], Step[0200/0626], Avg Loss: 0.7747 -INFO:local_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7740 -INFO:local_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7748 -INFO:local_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7739 -INFO:local_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7744 -INFO:local_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7741 -INFO:local_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7739 -INFO:master_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7741 -INFO:local_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7744 -INFO:local_logger:Epoch[030/800], Step[0300/0626], Avg Loss: 0.7736 -INFO:local_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7737 -INFO:local_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7736 -INFO:local_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7732 -INFO:local_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7735 -INFO:local_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7740 -INFO:local_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7732 -INFO:local_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7734 -INFO:local_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7732 -INFO:master_logger:Epoch[030/800], Step[0400/0626], Avg Loss: 0.7735 -INFO:local_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7730 -INFO:local_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7729 -INFO:local_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7729 -INFO:local_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7732 -INFO:local_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7731 -INFO:local_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7727 -INFO:local_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7729 -INFO:local_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7725 -INFO:master_logger:Epoch[030/800], Step[0500/0626], Avg Loss: 0.7729 -INFO:local_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7723 -INFO:local_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7723 -INFO:local_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7722 -INFO:local_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7720 -INFO:local_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7726 -INFO:local_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7724 -INFO:master_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7722 -INFO:local_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7723 -INFO:local_logger:Epoch[030/800], Step[0600/0626], Avg Loss: 0.7719 -INFO:local_logger:----- Epoch[030/800], Train Loss: 0.7722, time: 884.56 -INFO:local_logger:Now training epoch 31. LR=0.000116 -INFO:local_logger:----- Epoch[030/800], Train Loss: 0.7718, time: 884.68 -INFO:local_logger:Now training epoch 31. LR=0.000116 -INFO:local_logger:----- Epoch[030/800], Train Loss: 0.7719, time: 881.45 -INFO:master_logger:----- Epoch[030/800], Train Loss: 0.7721, time: 881.45 -INFO:local_logger:----- Epoch[030/800], Train Loss: 0.7721, time: 885.20 -INFO:local_logger:Now training epoch 31. LR=0.000116 -INFO:local_logger:----- Epoch[030/800], Train Loss: 0.7721, time: 885.20 -INFO:local_logger:----- Epoch[030/800], Train Loss: 0.7722, time: 885.14 -INFO:local_logger:Now training epoch 31. LR=0.000116 -INFO:local_logger:Now training epoch 31. LR=0.000116 -INFO:local_logger:----- Epoch[030/800], Train Loss: 0.7724, time: 885.17 -INFO:local_logger:Now training epoch 31. LR=0.000116 -INFO:local_logger:----- Epoch[030/800], Train Loss: 0.7723, time: 885.20 -INFO:local_logger:Now training epoch 31. LR=0.000116 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-30-Loss-0.7718740027551073.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-30-Loss-0.7718740027551073.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-30-Loss-0.7718740027551073.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-30-Loss-0.7718740027551073.pdopt -INFO:local_logger:Now training epoch 31. LR=0.000116 -INFO:master_logger:Now training epoch 31. LR=0.000116 -INFO:local_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7753 -INFO:local_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7631 -INFO:local_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7707 -INFO:local_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7713 -INFO:local_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7666 -INFO:local_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7545 -INFO:master_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7651 -INFO:local_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7590 -INFO:local_logger:Epoch[031/800], Step[0000/0626], Avg Loss: 0.7607 -INFO:local_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7683 -INFO:master_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7684 -INFO:local_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7667 -INFO:local_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7685 -INFO:local_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7688 -INFO:local_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7677 -INFO:local_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7694 -INFO:local_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7688 -INFO:local_logger:Epoch[031/800], Step[0100/0626], Avg Loss: 0.7686 -INFO:local_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7679 -INFO:local_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7681 -INFO:local_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7678 -INFO:local_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7680 -INFO:master_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7680 -INFO:local_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7683 -INFO:local_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7685 -INFO:local_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7681 -INFO:local_logger:Epoch[031/800], Step[0200/0626], Avg Loss: 0.7672 -INFO:local_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7678 -INFO:local_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7684 -INFO:local_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7676 -INFO:local_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7680 -INFO:local_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7676 -INFO:master_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7677 -INFO:local_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7668 -INFO:local_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7674 -INFO:local_logger:Epoch[031/800], Step[0300/0626], Avg Loss: 0.7679 -INFO:local_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7667 -INFO:local_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7676 -INFO:local_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7674 -INFO:local_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7676 -INFO:local_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7675 -INFO:local_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7678 -INFO:local_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7683 -INFO:local_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7678 -INFO:master_logger:Epoch[031/800], Step[0400/0626], Avg Loss: 0.7676 -INFO:local_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7669 -INFO:local_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7673 -INFO:local_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7674 -INFO:local_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7670 -INFO:local_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7672 -INFO:master_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7671 -INFO:local_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7663 -INFO:local_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7671 -INFO:local_logger:Epoch[031/800], Step[0500/0626], Avg Loss: 0.7676 -INFO:local_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7669 -INFO:local_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7668 -INFO:local_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7664 -INFO:local_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7667 -INFO:local_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7661 -INFO:local_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7667 -INFO:master_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7667 -INFO:local_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7670 -INFO:local_logger:Epoch[031/800], Step[0600/0626], Avg Loss: 0.7669 -INFO:local_logger:----- Epoch[031/800], Train Loss: 0.7663, time: 868.34 -INFO:local_logger:Now training epoch 32. LR=0.000120 -INFO:local_logger:----- Epoch[031/800], Train Loss: 0.7660, time: 868.35 -INFO:local_logger:Now training epoch 32. LR=0.000120 -INFO:local_logger:----- Epoch[031/800], Train Loss: 0.7666, time: 868.93 -INFO:local_logger:Now training epoch 32. LR=0.000120 -INFO:local_logger:----- Epoch[031/800], Train Loss: 0.7667, time: 868.45 -INFO:local_logger:Now training epoch 32. LR=0.000120 -INFO:local_logger:----- Epoch[031/800], Train Loss: 0.7668, time: 868.50 -INFO:local_logger:Now training epoch 32. LR=0.000120 -INFO:local_logger:----- Epoch[031/800], Train Loss: 0.7666, time: 864.87 -INFO:master_logger:----- Epoch[031/800], Train Loss: 0.7665, time: 864.87 -INFO:local_logger:----- Epoch[031/800], Train Loss: 0.7668, time: 869.11 -INFO:local_logger:Now training epoch 32. LR=0.000120 -INFO:local_logger:----- Epoch[031/800], Train Loss: 0.7665, time: 868.66 -INFO:local_logger:Now training epoch 32. LR=0.000120 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-31-Loss-0.7666413477179265.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-31-Loss-0.7666413477179265.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-31-Loss-0.7666413477179265.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-31-Loss-0.7666413477179265.pdopt -INFO:local_logger:Now training epoch 32. LR=0.000120 -INFO:master_logger:Now training epoch 32. LR=0.000120 -INFO:local_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7643 -INFO:local_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7629 -INFO:local_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7742 -INFO:master_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7666 -INFO:local_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7638 -INFO:local_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7575 -INFO:local_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7728 -INFO:local_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7710 -INFO:local_logger:Epoch[032/800], Step[0000/0626], Avg Loss: 0.7662 -INFO:local_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7641 -INFO:local_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7647 -INFO:local_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7646 -INFO:master_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7645 -INFO:local_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7645 -INFO:local_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7638 -INFO:local_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7636 -INFO:local_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7646 -INFO:local_logger:Epoch[032/800], Step[0100/0626], Avg Loss: 0.7657 -INFO:local_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7632 -INFO:local_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7627 -INFO:local_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7635 -INFO:local_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7633 -INFO:local_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7638 -INFO:master_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7635 -INFO:local_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7637 -INFO:local_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7629 -INFO:local_logger:Epoch[032/800], Step[0200/0626], Avg Loss: 0.7644 -INFO:local_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7629 -INFO:local_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7628 -INFO:local_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7626 -INFO:local_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7631 -INFO:local_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7633 -INFO:local_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7634 -INFO:master_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7629 -INFO:local_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7626 -INFO:local_logger:Epoch[032/800], Step[0300/0626], Avg Loss: 0.7628 -INFO:local_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7623 -INFO:local_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7625 -INFO:local_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7620 -INFO:local_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7630 -INFO:local_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7621 -INFO:local_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7628 -INFO:master_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7624 -INFO:local_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7622 -INFO:local_logger:Epoch[032/800], Step[0400/0626], Avg Loss: 0.7621 -INFO:local_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7617 -INFO:local_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7623 -INFO:local_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7623 -INFO:master_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7619 -INFO:local_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7618 -INFO:local_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7617 -INFO:local_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7619 -INFO:local_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7617 -INFO:local_logger:Epoch[032/800], Step[0500/0626], Avg Loss: 0.7617 -INFO:local_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7613 -INFO:local_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7613 -INFO:local_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7616 -INFO:local_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7612 -INFO:local_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7615 -INFO:local_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7612 -INFO:master_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7614 -INFO:local_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7616 -INFO:local_logger:Epoch[032/800], Step[0600/0626], Avg Loss: 0.7611 -INFO:local_logger:----- Epoch[032/800], Train Loss: 0.7611, time: 877.39 -INFO:local_logger:Now training epoch 33. LR=0.000124 -INFO:local_logger:----- Epoch[032/800], Train Loss: 0.7610, time: 878.25 -INFO:local_logger:Now training epoch 33. LR=0.000124 -INFO:local_logger:----- Epoch[032/800], Train Loss: 0.7609, time: 878.35 -INFO:local_logger:Now training epoch 33. LR=0.000124 -INFO:local_logger:----- Epoch[032/800], Train Loss: 0.7615, time: 874.33 -INFO:master_logger:----- Epoch[032/800], Train Loss: 0.7612, time: 874.33 -INFO:local_logger:----- Epoch[032/800], Train Loss: 0.7612, time: 878.35 -INFO:local_logger:Now training epoch 33. LR=0.000124 -INFO:local_logger:----- Epoch[032/800], Train Loss: 0.7612, time: 878.38 -INFO:local_logger:Now training epoch 33. LR=0.000124 -INFO:local_logger:----- Epoch[032/800], Train Loss: 0.7612, time: 878.23 -INFO:local_logger:Now training epoch 33. LR=0.000124 -INFO:local_logger:----- Epoch[032/800], Train Loss: 0.7615, time: 878.10 -INFO:local_logger:Now training epoch 33. LR=0.000124 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-32-Loss-0.761453199911086.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-32-Loss-0.761453199911086.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-32-Loss-0.761453199911086.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-32-Loss-0.761453199911086.pdopt -INFO:local_logger:Now training epoch 33. LR=0.000124 -INFO:master_logger:Now training epoch 33. LR=0.000124 -INFO:local_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7557 -INFO:local_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7650 -INFO:local_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7551 -INFO:master_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7556 -INFO:local_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7455 -INFO:local_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7599 -INFO:local_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7565 -INFO:local_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7625 -INFO:local_logger:Epoch[033/800], Step[0000/0626], Avg Loss: 0.7448 -INFO:local_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7586 -INFO:local_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7590 -INFO:local_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7584 -INFO:local_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7588 -INFO:local_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7580 -INFO:local_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7570 -INFO:local_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7575 -INFO:local_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7570 -INFO:master_logger:Epoch[033/800], Step[0100/0626], Avg Loss: 0.7580 -INFO:local_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7577 -INFO:local_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7575 -INFO:local_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7584 -INFO:local_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7580 -INFO:local_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7584 -INFO:local_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7571 -INFO:local_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7574 -INFO:master_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7578 -INFO:local_logger:Epoch[033/800], Step[0200/0626], Avg Loss: 0.7576 -INFO:local_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7572 -INFO:local_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7579 -INFO:local_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7576 -INFO:local_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7568 -INFO:local_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7577 -INFO:local_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7579 -INFO:master_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7575 -INFO:local_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7579 -INFO:local_logger:Epoch[033/800], Step[0300/0626], Avg Loss: 0.7572 -INFO:local_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7574 -INFO:local_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7571 -INFO:local_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7568 -INFO:local_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7573 -INFO:local_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7571 -INFO:local_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7573 -INFO:local_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7572 -INFO:local_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7565 -INFO:master_logger:Epoch[033/800], Step[0400/0626], Avg Loss: 0.7571 -INFO:local_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7565 -INFO:local_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7567 -INFO:local_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7570 -INFO:local_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7569 -INFO:local_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7564 -INFO:local_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7570 -INFO:local_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7569 -INFO:master_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7568 -INFO:local_logger:Epoch[033/800], Step[0500/0626], Avg Loss: 0.7569 -INFO:local_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7564 -INFO:local_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7561 -INFO:local_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7567 -INFO:local_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7561 -INFO:local_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7566 -INFO:local_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7567 -INFO:master_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7564 -INFO:local_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7563 -INFO:local_logger:Epoch[033/800], Step[0600/0626], Avg Loss: 0.7566 -INFO:local_logger:----- Epoch[033/800], Train Loss: 0.7561, time: 852.50 -INFO:master_logger:----- Epoch[033/800], Train Loss: 0.7564, time: 852.50 -INFO:local_logger:----- Epoch[033/800], Train Loss: 0.7566, time: 856.70 -INFO:local_logger:Now training epoch 34. LR=0.000128 -INFO:local_logger:----- Epoch[033/800], Train Loss: 0.7566, time: 856.72 -INFO:local_logger:Now training epoch 34. LR=0.000128 -INFO:local_logger:----- Epoch[033/800], Train Loss: 0.7560, time: 856.75 -INFO:local_logger:Now training epoch 34. LR=0.000128 -INFO:local_logger:----- Epoch[033/800], Train Loss: 0.7566, time: 857.55 -INFO:local_logger:Now training epoch 34. LR=0.000128 -INFO:local_logger:----- Epoch[033/800], Train Loss: 0.7562, time: 856.91 -INFO:local_logger:Now training epoch 34. LR=0.000128 -INFO:local_logger:----- Epoch[033/800], Train Loss: 0.7565, time: 856.90 -INFO:local_logger:Now training epoch 34. LR=0.000128 -INFO:local_logger:----- Epoch[033/800], Train Loss: 0.7563, time: 856.92 -INFO:local_logger:Now training epoch 34. LR=0.000128 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-33-Loss-0.7561021761674307.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-33-Loss-0.7561021761674307.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-33-Loss-0.7561021761674307.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-33-Loss-0.7561021761674307.pdopt -INFO:local_logger:Now training epoch 34. LR=0.000128 -INFO:master_logger:Now training epoch 34. LR=0.000128 -INFO:local_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7407 -INFO:master_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7518 -INFO:local_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7494 -INFO:local_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7433 -INFO:local_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7598 -INFO:local_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7563 -INFO:local_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7612 -INFO:local_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7477 -INFO:local_logger:Epoch[034/800], Step[0000/0626], Avg Loss: 0.7559 -INFO:local_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7547 -INFO:local_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7548 -INFO:local_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7543 -INFO:local_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7539 -INFO:local_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7534 -INFO:local_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7536 -INFO:master_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7540 -INFO:local_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7534 -INFO:local_logger:Epoch[034/800], Step[0100/0626], Avg Loss: 0.7540 -INFO:local_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7539 -INFO:local_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7529 -INFO:local_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7541 -INFO:local_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7541 -INFO:local_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7531 -INFO:master_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7536 -INFO:local_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7536 -INFO:local_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7534 -INFO:local_logger:Epoch[034/800], Step[0200/0626], Avg Loss: 0.7533 -INFO:local_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7529 -INFO:local_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7533 -INFO:local_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7532 -INFO:local_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7531 -INFO:local_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7528 -INFO:local_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7534 -INFO:master_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7531 -INFO:local_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7539 -INFO:local_logger:Epoch[034/800], Step[0300/0626], Avg Loss: 0.7523 -INFO:local_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7527 -INFO:local_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7527 -INFO:local_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7526 -INFO:local_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7520 -INFO:local_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7536 -INFO:local_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7525 -INFO:local_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7525 -INFO:local_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7528 -INFO:master_logger:Epoch[034/800], Step[0400/0626], Avg Loss: 0.7527 -INFO:local_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7526 -INFO:local_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7520 -INFO:local_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7518 -INFO:local_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7523 -INFO:local_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7525 -INFO:local_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7524 -INFO:local_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7522 -INFO:local_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7532 -INFO:master_logger:Epoch[034/800], Step[0500/0626], Avg Loss: 0.7524 -INFO:local_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7520 -INFO:local_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7521 -INFO:local_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7516 -INFO:local_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7522 -INFO:local_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7516 -INFO:master_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7520 -INFO:local_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7521 -INFO:local_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7526 -INFO:local_logger:Epoch[034/800], Step[0600/0626], Avg Loss: 0.7516 -INFO:local_logger:----- Epoch[034/800], Train Loss: 0.7519, time: 885.18 -INFO:local_logger:Now training epoch 35. LR=0.000131 -INFO:local_logger:----- Epoch[034/800], Train Loss: 0.7520, time: 885.17 -INFO:local_logger:Now training epoch 35. LR=0.000131 -INFO:local_logger:----- Epoch[034/800], Train Loss: 0.7519, time: 885.32 -INFO:local_logger:Now training epoch 35. LR=0.000131 -INFO:local_logger:----- Epoch[034/800], Train Loss: 0.7516, time: 882.34 -INFO:master_logger:----- Epoch[034/800], Train Loss: 0.7519, time: 882.34 -INFO:local_logger:----- Epoch[034/800], Train Loss: 0.7522, time: 885.50 -INFO:local_logger:Now training epoch 35. LR=0.000131 -INFO:local_logger:----- Epoch[034/800], Train Loss: 0.7516, time: 885.48 -INFO:local_logger:Now training epoch 35. LR=0.000131 -INFO:local_logger:----- Epoch[034/800], Train Loss: 0.7516, time: 885.48 -INFO:local_logger:Now training epoch 35. LR=0.000131 -INFO:local_logger:----- Epoch[034/800], Train Loss: 0.7524, time: 885.48 -INFO:local_logger:Now training epoch 35. LR=0.000131 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-34-Loss-0.7516406552539668.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-34-Loss-0.7516406552539668.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-34-Loss-0.7516406552539668.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-34-Loss-0.7516406552539668.pdopt -INFO:local_logger:Now training epoch 35. LR=0.000131 -INFO:master_logger:Now training epoch 35. LR=0.000131 -INFO:local_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7487 -INFO:local_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7414 -INFO:local_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7520 -INFO:master_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7511 -INFO:local_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7453 -INFO:local_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7505 -INFO:local_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7633 -INFO:local_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7544 -INFO:local_logger:Epoch[035/800], Step[0000/0626], Avg Loss: 0.7535 -INFO:local_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7500 -INFO:local_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7504 -INFO:local_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7496 -INFO:local_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7499 -INFO:local_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7493 -INFO:local_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7492 -INFO:local_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7490 -INFO:master_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7494 -INFO:local_logger:Epoch[035/800], Step[0100/0626], Avg Loss: 0.7481 -INFO:local_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7496 -INFO:local_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7496 -INFO:local_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7495 -INFO:local_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7485 -INFO:local_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7487 -INFO:master_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7493 -INFO:local_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7490 -INFO:local_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7498 -INFO:local_logger:Epoch[035/800], Step[0200/0626], Avg Loss: 0.7493 -INFO:local_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7484 -INFO:local_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7486 -INFO:local_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7491 -INFO:local_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7494 -INFO:local_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7494 -INFO:master_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7490 -INFO:local_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7491 -INFO:local_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7488 -INFO:local_logger:Epoch[035/800], Step[0300/0626], Avg Loss: 0.7495 -INFO:local_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7488 -INFO:local_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7490 -INFO:local_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7483 -INFO:local_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7490 -INFO:local_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7484 -INFO:local_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7489 -INFO:local_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7480 -INFO:master_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7486 -INFO:local_logger:Epoch[035/800], Step[0400/0626], Avg Loss: 0.7484 -INFO:local_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7485 -INFO:local_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7488 -INFO:local_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7483 -INFO:local_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7482 -INFO:local_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7479 -INFO:local_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7482 -INFO:local_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7491 -INFO:master_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7484 -INFO:local_logger:Epoch[035/800], Step[0500/0626], Avg Loss: 0.7484 -INFO:local_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7482 -INFO:local_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7485 -INFO:local_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7478 -INFO:local_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7480 -INFO:local_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7480 -INFO:local_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7481 -INFO:local_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7478 -INFO:local_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7485 -INFO:master_logger:Epoch[035/800], Step[0600/0626], Avg Loss: 0.7481 -INFO:local_logger:----- Epoch[035/800], Train Loss: 0.7484, time: 858.24 -INFO:local_logger:Now training epoch 36. LR=0.000135 -INFO:local_logger:----- Epoch[035/800], Train Loss: 0.7479, time: 859.58 -INFO:local_logger:Now training epoch 36. LR=0.000135 -INFO:local_logger:----- Epoch[035/800], Train Loss: 0.7481, time: 859.55 -INFO:local_logger:Now training epoch 36. LR=0.000135 -INFO:local_logger:----- Epoch[035/800], Train Loss: 0.7480, time: 859.83 -INFO:local_logger:Now training epoch 36. LR=0.000135 -INFO:local_logger:----- Epoch[035/800], Train Loss: 0.7480, time: 859.45 -INFO:local_logger:Now training epoch 36. LR=0.000135 -INFO:local_logger:----- Epoch[035/800], Train Loss: 0.7484, time: 859.45 -INFO:local_logger:Now training epoch 36. LR=0.000135 -INFO:local_logger:----- Epoch[035/800], Train Loss: 0.7477, time: 855.75 -INFO:master_logger:----- Epoch[035/800], Train Loss: 0.7480, time: 855.75 -INFO:local_logger:----- Epoch[035/800], Train Loss: 0.7478, time: 859.47 -INFO:local_logger:Now training epoch 36. LR=0.000135 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-35-Loss-0.7477136553201034.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-35-Loss-0.7477136553201034.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-35-Loss-0.7477136553201034.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-35-Loss-0.7477136553201034.pdopt -INFO:local_logger:Now training epoch 36. LR=0.000135 -INFO:master_logger:Now training epoch 36. LR=0.000135 -INFO:local_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7546 -INFO:local_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7470 -INFO:local_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7469 -INFO:master_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7497 -INFO:local_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7536 -INFO:local_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7544 -INFO:local_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7537 -INFO:local_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7427 -INFO:local_logger:Epoch[036/800], Step[0000/0626], Avg Loss: 0.7446 -INFO:local_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7474 -INFO:local_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7461 -INFO:local_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7458 -INFO:local_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7467 -INFO:local_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7456 -INFO:local_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7469 -INFO:local_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7457 -INFO:master_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7463 -INFO:local_logger:Epoch[036/800], Step[0100/0626], Avg Loss: 0.7461 -INFO:local_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7459 -INFO:local_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7473 -INFO:local_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7459 -INFO:local_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7461 -INFO:local_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7460 -INFO:local_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7450 -INFO:master_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7460 -INFO:local_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7464 -INFO:local_logger:Epoch[036/800], Step[0200/0626], Avg Loss: 0.7453 -INFO:local_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7464 -INFO:local_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7457 -INFO:local_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7456 -INFO:local_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7461 -INFO:local_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7456 -INFO:master_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7456 -INFO:local_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7451 -INFO:local_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7456 -INFO:local_logger:Epoch[036/800], Step[0300/0626], Avg Loss: 0.7450 -INFO:local_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7453 -INFO:local_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7449 -INFO:local_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7457 -INFO:local_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7456 -INFO:local_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7456 -INFO:local_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7450 -INFO:master_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7453 -INFO:local_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7452 -INFO:local_logger:Epoch[036/800], Step[0400/0626], Avg Loss: 0.7449 -INFO:local_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7452 -INFO:local_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7450 -INFO:local_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7453 -INFO:local_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7449 -INFO:local_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7449 -INFO:local_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7452 -INFO:local_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7448 -INFO:local_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7451 -INFO:master_logger:Epoch[036/800], Step[0500/0626], Avg Loss: 0.7451 -INFO:local_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7446 -INFO:local_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7446 -INFO:local_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7446 -INFO:local_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7449 -INFO:local_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7446 -INFO:local_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7444 -INFO:local_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7450 -INFO:local_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7450 -INFO:master_logger:Epoch[036/800], Step[0600/0626], Avg Loss: 0.7447 -INFO:local_logger:----- Epoch[036/800], Train Loss: 0.7449, time: 890.54 -INFO:local_logger:Now training epoch 37. LR=0.000139 -INFO:local_logger:----- Epoch[036/800], Train Loss: 0.7445, time: 891.48 -INFO:local_logger:----- Epoch[036/800], Train Loss: 0.7445, time: 891.09 -INFO:local_logger:Now training epoch 37. LR=0.000139 -INFO:local_logger:Now training epoch 37. LR=0.000139 -INFO:local_logger:----- Epoch[036/800], Train Loss: 0.7446, time: 887.41 -INFO:master_logger:----- Epoch[036/800], Train Loss: 0.7447, time: 887.41 -INFO:local_logger:----- Epoch[036/800], Train Loss: 0.7448, time: 891.48 -INFO:local_logger:Now training epoch 37. LR=0.000139 -INFO:local_logger:----- Epoch[036/800], Train Loss: 0.7449, time: 891.12 -INFO:local_logger:Now training epoch 37. LR=0.000139 -INFO:local_logger:----- Epoch[036/800], Train Loss: 0.7446, time: 892.33 -INFO:local_logger:Now training epoch 37. LR=0.000139 -INFO:local_logger:----- Epoch[036/800], Train Loss: 0.7444, time: 891.12 -INFO:local_logger:Now training epoch 37. LR=0.000139 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-36-Loss-0.7446498155174043.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-36-Loss-0.7446498155174043.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-36-Loss-0.7446498155174043.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-36-Loss-0.7446498155174043.pdopt -INFO:local_logger:Now training epoch 37. LR=0.000139 -INFO:master_logger:Now training epoch 37. LR=0.000139 -INFO:local_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7435 -INFO:local_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7441 -INFO:local_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7454 -INFO:local_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7397 -INFO:master_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7456 -INFO:local_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7399 -INFO:local_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7476 -INFO:local_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7504 -INFO:local_logger:Epoch[037/800], Step[0000/0626], Avg Loss: 0.7542 -INFO:local_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7433 -INFO:local_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7414 -INFO:local_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7429 -INFO:local_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7416 -INFO:local_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7421 -INFO:master_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7426 -INFO:local_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7423 -INFO:local_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7431 -INFO:local_logger:Epoch[037/800], Step[0100/0626], Avg Loss: 0.7438 -INFO:local_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7429 -INFO:local_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7434 -INFO:local_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7425 -INFO:local_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7427 -INFO:local_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7427 -INFO:local_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7428 -INFO:local_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7429 -INFO:local_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7424 -INFO:master_logger:Epoch[037/800], Step[0200/0626], Avg Loss: 0.7428 -INFO:local_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7422 -INFO:local_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7425 -INFO:local_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7417 -INFO:local_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7424 -INFO:local_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7423 -INFO:local_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7424 -INFO:local_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7427 -INFO:local_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7425 -INFO:master_logger:Epoch[037/800], Step[0300/0626], Avg Loss: 0.7423 -INFO:local_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7423 -INFO:local_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7419 -INFO:local_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7416 -INFO:local_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7420 -INFO:local_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7422 -INFO:local_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7414 -INFO:local_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7420 -INFO:local_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7419 -INFO:master_logger:Epoch[037/800], Step[0400/0626], Avg Loss: 0.7419 -INFO:local_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7414 -INFO:local_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7414 -INFO:local_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7416 -INFO:local_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7419 -INFO:local_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7417 -INFO:local_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7419 -INFO:local_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7415 -INFO:local_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7418 -INFO:master_logger:Epoch[037/800], Step[0500/0626], Avg Loss: 0.7417 -INFO:local_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7410 -INFO:local_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7416 -INFO:local_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7416 -INFO:local_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7417 -INFO:local_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7413 -INFO:local_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7413 -INFO:local_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7412 -INFO:master_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7414 -INFO:local_logger:Epoch[037/800], Step[0600/0626], Avg Loss: 0.7414 -INFO:local_logger:----- Epoch[037/800], Train Loss: 0.7415, time: 861.30 -INFO:local_logger:Now training epoch 38. LR=0.000143 -INFO:local_logger:----- Epoch[037/800], Train Loss: 0.7415, time: 861.30 -INFO:local_logger:Now training epoch 38. LR=0.000143 -INFO:local_logger:----- Epoch[037/800], Train Loss: 0.7414, time: 861.29 -INFO:local_logger:Now training epoch 38. LR=0.000143 -INFO:local_logger:----- Epoch[037/800], Train Loss: 0.7412, time: 861.53 -INFO:local_logger:Now training epoch 38. LR=0.000143 -INFO:local_logger:----- Epoch[037/800], Train Loss: 0.7412, time: 862.22 -INFO:local_logger:Now training epoch 38. LR=0.000143 -INFO:local_logger:----- Epoch[037/800], Train Loss: 0.7411, time: 861.67 -INFO:local_logger:Now training epoch 38. LR=0.000143 -INFO:local_logger:----- Epoch[037/800], Train Loss: 0.7410, time: 861.66 -INFO:local_logger:Now training epoch 38. LR=0.000143 -INFO:local_logger:----- Epoch[037/800], Train Loss: 0.7415, time: 858.01 -INFO:master_logger:----- Epoch[037/800], Train Loss: 0.7413, time: 858.01 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-37-Loss-0.7415279359559235.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-37-Loss-0.7415279359559235.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-37-Loss-0.7415279359559235.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-37-Loss-0.7415279359559235.pdopt -INFO:local_logger:Now training epoch 38. LR=0.000143 -INFO:master_logger:Now training epoch 38. LR=0.000143 -INFO:local_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7326 -INFO:local_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7470 -INFO:master_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7394 -INFO:local_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7295 -INFO:local_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7452 -INFO:local_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7429 -INFO:local_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7354 -INFO:local_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7382 -INFO:local_logger:Epoch[038/800], Step[0000/0626], Avg Loss: 0.7443 -INFO:local_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7383 -INFO:local_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7398 -INFO:local_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7393 -INFO:local_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7389 -INFO:local_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7386 -INFO:local_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7387 -INFO:master_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7392 -INFO:local_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7411 -INFO:local_logger:Epoch[038/800], Step[0100/0626], Avg Loss: 0.7392 -INFO:local_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7388 -INFO:local_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7388 -INFO:local_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7386 -INFO:local_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7386 -INFO:local_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7399 -INFO:local_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7386 -INFO:local_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7391 -INFO:master_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7389 -INFO:local_logger:Epoch[038/800], Step[0200/0626], Avg Loss: 0.7389 -INFO:local_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7388 -INFO:local_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7390 -INFO:local_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7388 -INFO:local_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7386 -INFO:local_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7388 -INFO:local_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7384 -INFO:local_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7385 -INFO:master_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7387 -INFO:local_logger:Epoch[038/800], Step[0300/0626], Avg Loss: 0.7389 -INFO:local_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7382 -INFO:local_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7388 -INFO:local_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7390 -INFO:local_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7387 -INFO:local_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7388 -INFO:master_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7387 -INFO:local_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7384 -INFO:local_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7386 -INFO:local_logger:Epoch[038/800], Step[0400/0626], Avg Loss: 0.7388 -INFO:local_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7383 -INFO:local_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7385 -INFO:local_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7385 -INFO:local_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7385 -INFO:local_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7390 -INFO:local_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7377 -INFO:local_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7385 -INFO:master_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7385 -INFO:local_logger:Epoch[038/800], Step[0500/0626], Avg Loss: 0.7387 -INFO:local_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7378 -INFO:local_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7382 -INFO:local_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7389 -INFO:local_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7381 -INFO:local_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7381 -INFO:local_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7381 -INFO:master_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7383 -INFO:local_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7381 -INFO:local_logger:Epoch[038/800], Step[0600/0626], Avg Loss: 0.7386 -INFO:local_logger:----- Epoch[038/800], Train Loss: 0.7381, time: 895.61 -INFO:local_logger:Now training epoch 39. LR=0.000146 -INFO:local_logger:----- Epoch[038/800], Train Loss: 0.7378, time: 895.58 -INFO:local_logger:Now training epoch 39. LR=0.000146 -INFO:local_logger:----- Epoch[038/800], Train Loss: 0.7380, time: 895.99 -INFO:local_logger:Now training epoch 39. LR=0.000146 -INFO:local_logger:----- Epoch[038/800], Train Loss: 0.7381, time: 896.21 -INFO:local_logger:Now training epoch 39. LR=0.000146 -INFO:local_logger:----- Epoch[038/800], Train Loss: 0.7381, time: 892.19 -INFO:master_logger:----- Epoch[038/800], Train Loss: 0.7382, time: 892.19 -INFO:local_logger:----- Epoch[038/800], Train Loss: 0.7385, time: 895.95 -INFO:local_logger:Now training epoch 39. LR=0.000146 -INFO:local_logger:----- Epoch[038/800], Train Loss: 0.7388, time: 896.06 -INFO:local_logger:Now training epoch 39. LR=0.000146 -INFO:local_logger:----- Epoch[038/800], Train Loss: 0.7381, time: 895.94 -INFO:local_logger:Now training epoch 39. LR=0.000146 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-38-Loss-0.7380608445157859.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-38-Loss-0.7380608445157859.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-38-Loss-0.7380608445157859.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-38-Loss-0.7380608445157859.pdopt -INFO:local_logger:Now training epoch 39. LR=0.000146 -INFO:master_logger:Now training epoch 39. LR=0.000146 -INFO:local_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7392 -INFO:master_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7382 -INFO:local_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7373 -INFO:local_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7385 -INFO:local_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7375 -INFO:local_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7414 -INFO:local_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7378 -INFO:local_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7312 -INFO:local_logger:Epoch[039/800], Step[0000/0626], Avg Loss: 0.7426 -INFO:local_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7358 -INFO:local_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7363 -INFO:local_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7362 -INFO:local_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7361 -INFO:local_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7359 -INFO:local_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7369 -INFO:local_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7365 -INFO:master_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7363 -INFO:local_logger:Epoch[039/800], Step[0100/0626], Avg Loss: 0.7366 -INFO:local_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7366 -INFO:local_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7359 -INFO:local_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7371 -INFO:local_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7356 -INFO:local_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7361 -INFO:master_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7362 -INFO:local_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7363 -INFO:local_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7364 -INFO:local_logger:Epoch[039/800], Step[0200/0626], Avg Loss: 0.7360 -INFO:local_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7354 -INFO:local_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7356 -INFO:local_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7368 -INFO:local_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7364 -INFO:local_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7363 -INFO:master_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7360 -INFO:local_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7357 -INFO:local_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7359 -INFO:local_logger:Epoch[039/800], Step[0300/0626], Avg Loss: 0.7360 -INFO:local_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7359 -INFO:local_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7357 -INFO:local_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7355 -INFO:local_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7359 -INFO:local_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7359 -INFO:master_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7358 -INFO:local_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7365 -INFO:local_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7355 -INFO:local_logger:Epoch[039/800], Step[0400/0626], Avg Loss: 0.7356 -INFO:local_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7352 -INFO:local_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7352 -INFO:local_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7356 -INFO:local_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7355 -INFO:local_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7358 -INFO:local_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7352 -INFO:local_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7356 -INFO:master_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7354 -INFO:local_logger:Epoch[039/800], Step[0500/0626], Avg Loss: 0.7353 -INFO:local_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7351 -INFO:local_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7351 -INFO:local_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7354 -INFO:local_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7353 -INFO:local_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7351 -INFO:local_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7356 -INFO:local_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7356 -INFO:master_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7354 -INFO:local_logger:Epoch[039/800], Step[0600/0626], Avg Loss: 0.7355 -INFO:local_logger:----- Epoch[039/800], Train Loss: 0.7351, time: 865.25 -INFO:local_logger:----- Epoch[039/800], Train Loss: 0.7350, time: 864.90 -INFO:local_logger:Now training epoch 40. LR=0.000150 -INFO:local_logger:Now training epoch 40. LR=0.000150 -INFO:local_logger:----- Epoch[039/800], Train Loss: 0.7353, time: 860.87 -INFO:master_logger:----- Epoch[039/800], Train Loss: 0.7353, time: 860.87 -INFO:local_logger:----- Epoch[039/800], Train Loss: 0.7352, time: 864.70 -INFO:local_logger:Now training epoch 40. LR=0.000150 -INFO:local_logger:----- Epoch[039/800], Train Loss: 0.7355, time: 864.66 -INFO:local_logger:Now training epoch 40. LR=0.000150 -INFO:local_logger:----- Epoch[039/800], Train Loss: 0.7355, time: 864.70 -INFO:local_logger:Now training epoch 40. LR=0.000150 -INFO:local_logger:----- Epoch[039/800], Train Loss: 0.7356, time: 864.70 -INFO:local_logger:Now training epoch 40. LR=0.000150 -INFO:local_logger:----- Epoch[039/800], Train Loss: 0.7351, time: 865.02 -INFO:local_logger:Now training epoch 40. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-39-Loss-0.7353187804344304.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-39-Loss-0.7353187804344304.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-39-Loss-0.7353187804344304.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-39-Loss-0.7353187804344304.pdopt -INFO:local_logger:Now training epoch 40. LR=0.000150 -INFO:master_logger:Now training epoch 40. LR=0.000150 -INFO:local_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7343 -INFO:local_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7310 -INFO:local_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7444 -INFO:local_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7305 -INFO:master_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7355 -INFO:local_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7357 -INFO:local_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7310 -INFO:local_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7439 -INFO:local_logger:Epoch[040/800], Step[0000/0626], Avg Loss: 0.7330 -INFO:local_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7348 -INFO:local_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7344 -INFO:local_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7346 -INFO:local_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7348 -INFO:master_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7341 -INFO:local_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7345 -INFO:local_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7336 -INFO:local_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7331 -INFO:local_logger:Epoch[040/800], Step[0100/0626], Avg Loss: 0.7333 -INFO:local_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7340 -INFO:local_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7343 -INFO:local_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7335 -INFO:local_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7340 -INFO:local_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7335 -INFO:local_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7334 -INFO:local_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7326 -INFO:master_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7336 -INFO:local_logger:Epoch[040/800], Step[0200/0626], Avg Loss: 0.7339 -INFO:local_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7338 -INFO:local_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7336 -INFO:local_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7331 -INFO:local_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7338 -INFO:local_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7332 -INFO:master_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7334 -INFO:local_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7329 -INFO:local_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7334 -INFO:local_logger:Epoch[040/800], Step[0300/0626], Avg Loss: 0.7338 -INFO:local_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7335 -INFO:local_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7335 -INFO:local_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7336 -INFO:local_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7334 -INFO:local_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7328 -INFO:local_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7332 -INFO:master_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7333 -INFO:local_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7332 -INFO:local_logger:Epoch[040/800], Step[0400/0626], Avg Loss: 0.7330 -INFO:local_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7330 -INFO:local_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7323 -INFO:local_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7333 -INFO:local_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7333 -INFO:local_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7329 -INFO:local_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7334 -INFO:local_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7330 -INFO:local_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7329 -INFO:master_logger:Epoch[040/800], Step[0500/0626], Avg Loss: 0.7330 -INFO:local_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7329 -INFO:local_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7329 -INFO:local_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7328 -INFO:local_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7329 -INFO:local_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7328 -INFO:local_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7329 -INFO:local_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7322 -INFO:master_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7328 -INFO:local_logger:Epoch[040/800], Step[0600/0626], Avg Loss: 0.7328 -INFO:local_logger:----- Epoch[040/800], Train Loss: 0.7328, time: 895.01 -INFO:local_logger:Now training epoch 41. LR=0.000150 -INFO:local_logger:----- Epoch[040/800], Train Loss: 0.7328, time: 890.99 -INFO:local_logger:----- Epoch[040/800], Train Loss: 0.7328, time: 895.12 -INFO:master_logger:----- Epoch[040/800], Train Loss: 0.7327, time: 890.99 -INFO:local_logger:Now training epoch 41. LR=0.000150 -INFO:local_logger:----- Epoch[040/800], Train Loss: 0.7328, time: 895.13 -INFO:local_logger:Now training epoch 41. LR=0.000150 -INFO:local_logger:----- Epoch[040/800], Train Loss: 0.7328, time: 894.99 -INFO:local_logger:Now training epoch 41. LR=0.000150 -INFO:local_logger:----- Epoch[040/800], Train Loss: 0.7328, time: 894.98 -INFO:local_logger:Now training epoch 41. LR=0.000150 -INFO:local_logger:----- Epoch[040/800], Train Loss: 0.7321, time: 894.99 -INFO:local_logger:Now training epoch 41. LR=0.000150 -INFO:local_logger:----- Epoch[040/800], Train Loss: 0.7327, time: 895.07 -INFO:local_logger:Now training epoch 41. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-40-Loss-0.7327702230552797.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-40-Loss-0.7327702230552797.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-40-Loss-0.7327702230552797.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-40-Loss-0.7327702230552797.pdopt -INFO:local_logger:Now training epoch 41. LR=0.000150 -INFO:master_logger:Now training epoch 41. LR=0.000150 -INFO:local_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7292 -INFO:master_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7311 -INFO:local_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7233 -INFO:local_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7219 -INFO:local_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7395 -INFO:local_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7295 -INFO:local_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7279 -INFO:local_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7431 -INFO:local_logger:Epoch[041/800], Step[0000/0626], Avg Loss: 0.7341 -INFO:local_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7312 -INFO:local_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7315 -INFO:local_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7304 -INFO:local_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7317 -INFO:local_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7307 -INFO:master_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7311 -INFO:local_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7308 -INFO:local_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7302 -INFO:local_logger:Epoch[041/800], Step[0100/0626], Avg Loss: 0.7321 -INFO:local_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7308 -INFO:local_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7315 -INFO:local_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7308 -INFO:local_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7317 -INFO:local_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7303 -INFO:local_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7313 -INFO:master_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7310 -INFO:local_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7309 -INFO:local_logger:Epoch[041/800], Step[0200/0626], Avg Loss: 0.7308 -INFO:local_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7310 -INFO:local_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7307 -INFO:local_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7304 -INFO:local_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7307 -INFO:master_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7308 -INFO:local_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7310 -INFO:local_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7308 -INFO:local_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7312 -INFO:local_logger:Epoch[041/800], Step[0300/0626], Avg Loss: 0.7304 -INFO:local_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7312 -INFO:local_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7309 -INFO:local_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7301 -INFO:local_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7305 -INFO:local_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7305 -INFO:master_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7306 -INFO:local_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7307 -INFO:local_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7303 -INFO:local_logger:Epoch[041/800], Step[0400/0626], Avg Loss: 0.7307 -INFO:local_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7303 -INFO:local_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7302 -INFO:local_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7306 -INFO:local_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7305 -INFO:local_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7300 -INFO:local_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7306 -INFO:local_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7304 -INFO:local_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7304 -INFO:master_logger:Epoch[041/800], Step[0500/0626], Avg Loss: 0.7304 -INFO:local_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7303 -INFO:local_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7304 -INFO:local_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7301 -INFO:local_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7299 -INFO:local_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7302 -INFO:master_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7301 -INFO:local_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7299 -INFO:local_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7302 -INFO:local_logger:Epoch[041/800], Step[0600/0626], Avg Loss: 0.7301 -INFO:local_logger:----- Epoch[041/800], Train Loss: 0.7305, time: 866.86 -INFO:local_logger:Now training epoch 42. LR=0.000150 -INFO:local_logger:----- Epoch[041/800], Train Loss: 0.7300, time: 866.90 -INFO:local_logger:Now training epoch 42. LR=0.000150 -INFO:local_logger:----- Epoch[041/800], Train Loss: 0.7304, time: 867.40 -INFO:local_logger:Now training epoch 42. LR=0.000150 -INFO:local_logger:----- Epoch[041/800], Train Loss: 0.7303, time: 863.76 -INFO:master_logger:----- Epoch[041/800], Train Loss: 0.7302, time: 863.76 -INFO:local_logger:----- Epoch[041/800], Train Loss: 0.7301, time: 867.52 -INFO:local_logger:Now training epoch 42. LR=0.000150 -INFO:local_logger:----- Epoch[041/800], Train Loss: 0.7303, time: 867.54 -INFO:local_logger:Now training epoch 42. LR=0.000150 -INFO:local_logger:----- Epoch[041/800], Train Loss: 0.7302, time: 867.65 -INFO:local_logger:Now training epoch 42. LR=0.000150 -INFO:local_logger:----- Epoch[041/800], Train Loss: 0.7300, time: 867.66 -INFO:local_logger:Now training epoch 42. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-41-Loss-0.7302703064055491.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-41-Loss-0.7302703064055491.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-41-Loss-0.7302703064055491.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-41-Loss-0.7302703064055491.pdopt -INFO:local_logger:Now training epoch 42. LR=0.000150 -INFO:master_logger:Now training epoch 42. LR=0.000150 -INFO:local_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7300 -INFO:local_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7398 -INFO:master_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7281 -INFO:local_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7322 -INFO:local_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7185 -INFO:local_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7206 -INFO:local_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7231 -INFO:local_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7259 -INFO:local_logger:Epoch[042/800], Step[0000/0626], Avg Loss: 0.7347 -INFO:local_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7299 -INFO:local_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7279 -INFO:local_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7292 -INFO:local_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7300 -INFO:local_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7287 -INFO:local_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7292 -INFO:master_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7291 -INFO:local_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7283 -INFO:local_logger:Epoch[042/800], Step[0100/0626], Avg Loss: 0.7297 -INFO:local_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7285 -INFO:local_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7286 -INFO:local_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7290 -INFO:local_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7289 -INFO:local_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7292 -INFO:local_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7282 -INFO:master_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7286 -INFO:local_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7284 -INFO:local_logger:Epoch[042/800], Step[0200/0626], Avg Loss: 0.7283 -INFO:local_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7283 -INFO:local_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7288 -INFO:local_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7283 -INFO:local_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7277 -INFO:local_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7284 -INFO:local_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7286 -INFO:master_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7285 -INFO:local_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7287 -INFO:local_logger:Epoch[042/800], Step[0300/0626], Avg Loss: 0.7288 -INFO:local_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7279 -INFO:local_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7285 -INFO:local_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7287 -INFO:local_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7288 -INFO:local_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7283 -INFO:local_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7275 -INFO:local_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7284 -INFO:master_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7283 -INFO:local_logger:Epoch[042/800], Step[0400/0626], Avg Loss: 0.7279 -INFO:local_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7278 -INFO:local_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7284 -INFO:local_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7285 -INFO:local_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7284 -INFO:local_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7277 -INFO:local_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7279 -INFO:local_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7278 -INFO:local_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7285 -INFO:master_logger:Epoch[042/800], Step[0500/0626], Avg Loss: 0.7281 -INFO:local_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7278 -INFO:local_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7276 -INFO:local_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7285 -INFO:local_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7278 -INFO:local_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7277 -INFO:local_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7283 -INFO:local_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7282 -INFO:master_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7280 -INFO:local_logger:Epoch[042/800], Step[0600/0626], Avg Loss: 0.7280 -INFO:local_logger:----- Epoch[042/800], Train Loss: 0.7281, time: 892.19 -INFO:local_logger:Now training epoch 43. LR=0.000150 -INFO:local_logger:----- Epoch[042/800], Train Loss: 0.7280, time: 892.20 -INFO:local_logger:Now training epoch 43. LR=0.000150 -INFO:local_logger:----- Epoch[042/800], Train Loss: 0.7277, time: 892.57 -INFO:local_logger:Now training epoch 43. LR=0.000150 -INFO:local_logger:----- Epoch[042/800], Train Loss: 0.7276, time: 893.34 -INFO:local_logger:Now training epoch 43. LR=0.000150 -INFO:local_logger:----- Epoch[042/800], Train Loss: 0.7275, time: 892.63 -INFO:local_logger:Now training epoch 43. LR=0.000150 -INFO:local_logger:----- Epoch[042/800], Train Loss: 0.7283, time: 889.09 -INFO:master_logger:----- Epoch[042/800], Train Loss: 0.7279, time: 889.09 -INFO:local_logger:----- Epoch[042/800], Train Loss: 0.7282, time: 893.43 -INFO:local_logger:Now training epoch 43. LR=0.000150 -INFO:local_logger:----- Epoch[042/800], Train Loss: 0.7279, time: 892.89 -INFO:local_logger:Now training epoch 43. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-42-Loss-0.728333370828915.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-42-Loss-0.728333370828915.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-42-Loss-0.728333370828915.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-42-Loss-0.728333370828915.pdopt -INFO:local_logger:Now training epoch 43. LR=0.000150 -INFO:master_logger:Now training epoch 43. LR=0.000150 -INFO:local_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7259 -INFO:local_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7157 -INFO:master_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7299 -INFO:local_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7296 -INFO:local_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7389 -INFO:local_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7312 -INFO:local_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7351 -INFO:local_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7178 -INFO:local_logger:Epoch[043/800], Step[0000/0626], Avg Loss: 0.7452 -INFO:local_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7266 -INFO:local_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7263 -INFO:local_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7264 -INFO:local_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7253 -INFO:master_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7262 -INFO:local_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7269 -INFO:local_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7261 -INFO:local_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7256 -INFO:local_logger:Epoch[043/800], Step[0100/0626], Avg Loss: 0.7263 -INFO:local_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7260 -INFO:local_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7257 -INFO:local_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7268 -INFO:local_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7256 -INFO:local_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7263 -INFO:local_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7260 -INFO:master_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7260 -INFO:local_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7257 -INFO:local_logger:Epoch[043/800], Step[0200/0626], Avg Loss: 0.7258 -INFO:local_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7257 -INFO:local_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7262 -INFO:master_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7259 -INFO:local_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7260 -INFO:local_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7256 -INFO:local_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7264 -INFO:local_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7262 -INFO:local_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7257 -INFO:local_logger:Epoch[043/800], Step[0300/0626], Avg Loss: 0.7256 -INFO:local_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7257 -INFO:local_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7261 -INFO:local_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7257 -INFO:master_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7258 -INFO:local_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7257 -INFO:local_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7254 -INFO:local_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7253 -INFO:local_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7260 -INFO:local_logger:Epoch[043/800], Step[0400/0626], Avg Loss: 0.7263 -INFO:local_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7253 -INFO:local_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7257 -INFO:local_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7255 -INFO:local_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7258 -INFO:local_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7260 -INFO:master_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7256 -INFO:local_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7253 -INFO:local_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7255 -INFO:local_logger:Epoch[043/800], Step[0500/0626], Avg Loss: 0.7259 -INFO:local_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7256 -INFO:local_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7255 -INFO:local_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7258 -INFO:local_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7258 -INFO:local_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7253 -INFO:master_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7256 -INFO:local_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7254 -INFO:local_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7252 -INFO:local_logger:Epoch[043/800], Step[0600/0626], Avg Loss: 0.7258 -INFO:local_logger:----- Epoch[043/800], Train Loss: 0.7254, time: 856.34 -INFO:local_logger:Now training epoch 44. LR=0.000150 -INFO:local_logger:----- Epoch[043/800], Train Loss: 0.7256, time: 855.79 -INFO:local_logger:Now training epoch 44. LR=0.000150 -INFO:local_logger:----- Epoch[043/800], Train Loss: 0.7253, time: 856.30 -INFO:local_logger:Now training epoch 44. LR=0.000150 -INFO:local_logger:----- Epoch[043/800], Train Loss: 0.7258, time: 856.24 -INFO:local_logger:Now training epoch 44. LR=0.000150 -INFO:local_logger:----- Epoch[043/800], Train Loss: 0.7256, time: 852.55 -INFO:master_logger:----- Epoch[043/800], Train Loss: 0.7255, time: 852.55 -INFO:local_logger:----- Epoch[043/800], Train Loss: 0.7252, time: 856.26 -INFO:local_logger:Now training epoch 44. LR=0.000150 -INFO:local_logger:----- Epoch[043/800], Train Loss: 0.7258, time: 856.83 -INFO:local_logger:Now training epoch 44. LR=0.000150 -INFO:local_logger:----- Epoch[043/800], Train Loss: 0.7257, time: 856.33 -INFO:local_logger:Now training epoch 44. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-43-Loss-0.7255999422779928.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-43-Loss-0.7255999422779928.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-43-Loss-0.7255999422779928.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-43-Loss-0.7255999422779928.pdopt -INFO:local_logger:Now training epoch 44. LR=0.000150 -INFO:master_logger:Now training epoch 44. LR=0.000150 -INFO:local_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7265 -INFO:local_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7228 -INFO:master_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7233 -INFO:local_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7197 -INFO:local_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7206 -INFO:local_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7307 -INFO:local_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7300 -INFO:local_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7199 -INFO:local_logger:Epoch[044/800], Step[0000/0626], Avg Loss: 0.7161 -INFO:local_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7253 -INFO:local_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7241 -INFO:local_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7232 -INFO:local_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7238 -INFO:local_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7251 -INFO:local_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7248 -INFO:local_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7246 -INFO:local_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7250 -INFO:master_logger:Epoch[044/800], Step[0100/0626], Avg Loss: 0.7245 -INFO:local_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7238 -INFO:local_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7246 -INFO:local_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7237 -INFO:local_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7243 -INFO:local_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7241 -INFO:local_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7251 -INFO:master_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7243 -INFO:local_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7249 -INFO:local_logger:Epoch[044/800], Step[0200/0626], Avg Loss: 0.7239 -INFO:local_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7239 -INFO:local_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7241 -INFO:master_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7240 -INFO:local_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7239 -INFO:local_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7241 -INFO:local_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7235 -INFO:local_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7242 -INFO:local_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7247 -INFO:local_logger:Epoch[044/800], Step[0300/0626], Avg Loss: 0.7236 -INFO:local_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7238 -INFO:local_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7236 -INFO:local_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7241 -INFO:master_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7238 -INFO:local_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7236 -INFO:local_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7233 -INFO:local_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7243 -INFO:local_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7240 -INFO:local_logger:Epoch[044/800], Step[0400/0626], Avg Loss: 0.7239 -INFO:local_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7237 -INFO:local_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7240 -INFO:local_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7234 -INFO:master_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7236 -INFO:local_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7238 -INFO:local_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7231 -INFO:local_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7238 -INFO:local_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7237 -INFO:local_logger:Epoch[044/800], Step[0500/0626], Avg Loss: 0.7235 -INFO:local_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7236 -INFO:local_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7237 -INFO:local_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7235 -INFO:local_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7235 -INFO:local_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7231 -INFO:master_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7235 -INFO:local_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7237 -INFO:local_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7235 -INFO:local_logger:Epoch[044/800], Step[0600/0626], Avg Loss: 0.7233 -INFO:local_logger:----- Epoch[044/800], Train Loss: 0.7236, time: 893.49 -INFO:local_logger:Now training epoch 45. LR=0.000150 -INFO:local_logger:----- Epoch[044/800], Train Loss: 0.7231, time: 893.57 -INFO:local_logger:Now training epoch 45. LR=0.000150 -INFO:local_logger:----- Epoch[044/800], Train Loss: 0.7234, time: 893.55 -INFO:local_logger:Now training epoch 45. LR=0.000150 -INFO:local_logger:----- Epoch[044/800], Train Loss: 0.7234, time: 893.57 -INFO:local_logger:Now training epoch 45. LR=0.000150 -INFO:local_logger:----- Epoch[044/800], Train Loss: 0.7235, time: 889.85 -INFO:master_logger:----- Epoch[044/800], Train Loss: 0.7235, time: 889.85 -INFO:local_logger:----- Epoch[044/800], Train Loss: 0.7237, time: 894.16 -INFO:local_logger:Now training epoch 45. LR=0.000150 -INFO:local_logger:----- Epoch[044/800], Train Loss: 0.7238, time: 894.14 -INFO:local_logger:Now training epoch 45. LR=0.000150 -INFO:local_logger:----- Epoch[044/800], Train Loss: 0.7235, time: 893.86 -INFO:local_logger:Now training epoch 45. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-44-Loss-0.723522978396613.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-44-Loss-0.723522978396613.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-44-Loss-0.723522978396613.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-44-Loss-0.723522978396613.pdopt -INFO:local_logger:Now training epoch 45. LR=0.000150 -INFO:master_logger:Now training epoch 45. LR=0.000150 -INFO:local_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7270 -INFO:local_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7223 -INFO:local_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7158 -INFO:master_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7205 -INFO:local_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7192 -INFO:local_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7172 -INFO:local_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7222 -INFO:local_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7194 -INFO:local_logger:Epoch[045/800], Step[0000/0626], Avg Loss: 0.7208 -INFO:local_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7217 -INFO:local_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7221 -INFO:local_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7229 -INFO:local_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7221 -INFO:local_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7228 -INFO:local_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7213 -INFO:master_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7223 -INFO:local_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7222 -INFO:local_logger:Epoch[045/800], Step[0100/0626], Avg Loss: 0.7230 -INFO:local_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7218 -INFO:local_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7221 -INFO:local_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7231 -INFO:local_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7222 -INFO:local_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7228 -INFO:local_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7222 -INFO:local_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7222 -INFO:master_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7223 -INFO:local_logger:Epoch[045/800], Step[0200/0626], Avg Loss: 0.7219 -INFO:local_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7219 -INFO:local_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7220 -INFO:local_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7218 -INFO:local_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7219 -INFO:local_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7220 -INFO:master_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7219 -INFO:local_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7227 -INFO:local_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7213 -INFO:local_logger:Epoch[045/800], Step[0300/0626], Avg Loss: 0.7219 -INFO:local_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7217 -INFO:local_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7217 -INFO:local_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7217 -INFO:master_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7218 -INFO:local_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7212 -INFO:local_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7219 -INFO:local_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7220 -INFO:local_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7217 -INFO:local_logger:Epoch[045/800], Step[0400/0626], Avg Loss: 0.7224 -INFO:local_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7217 -INFO:local_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7217 -INFO:local_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7220 -INFO:master_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7218 -INFO:local_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7216 -INFO:local_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7218 -INFO:local_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7213 -INFO:local_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7217 -INFO:local_logger:Epoch[045/800], Step[0500/0626], Avg Loss: 0.7223 -INFO:local_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7212 -INFO:master_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7216 -INFO:local_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7216 -INFO:local_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7216 -INFO:local_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7220 -INFO:local_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7217 -INFO:local_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7219 -INFO:local_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7214 -INFO:local_logger:Epoch[045/800], Step[0600/0626], Avg Loss: 0.7213 -INFO:local_logger:----- Epoch[045/800], Train Loss: 0.7212, time: 859.11 -INFO:local_logger:Now training epoch 46. LR=0.000150 -INFO:local_logger:----- Epoch[045/800], Train Loss: 0.7212, time: 855.42 -INFO:master_logger:----- Epoch[045/800], Train Loss: 0.7216, time: 855.42 -INFO:local_logger:----- Epoch[045/800], Train Loss: 0.7216, time: 859.01 -INFO:local_logger:Now training epoch 46. LR=0.000150 -INFO:local_logger:----- Epoch[045/800], Train Loss: 0.7216, time: 859.54 -INFO:local_logger:Now training epoch 46. LR=0.000150 -INFO:local_logger:----- Epoch[045/800], Train Loss: 0.7219, time: 859.82 -INFO:local_logger:Now training epoch 46. LR=0.000150 -INFO:local_logger:----- Epoch[045/800], Train Loss: 0.7218, time: 859.92 -INFO:local_logger:----- Epoch[045/800], Train Loss: 0.7214, time: 859.73 -INFO:local_logger:Now training epoch 46. LR=0.000150 -INFO:local_logger:Now training epoch 46. LR=0.000150 -INFO:local_logger:----- Epoch[045/800], Train Loss: 0.7217, time: 859.75 -INFO:local_logger:Now training epoch 46. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-45-Loss-0.7212178304741391.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-45-Loss-0.7212178304741391.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-45-Loss-0.7212178304741391.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-45-Loss-0.7212178304741391.pdopt -INFO:local_logger:Now training epoch 46. LR=0.000150 -INFO:master_logger:Now training epoch 46. LR=0.000150 -INFO:local_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7165 -INFO:local_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7255 -INFO:local_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7341 -INFO:master_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7218 -INFO:local_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7214 -INFO:local_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7314 -INFO:local_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7055 -INFO:local_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7244 -INFO:local_logger:Epoch[046/800], Step[0000/0626], Avg Loss: 0.7156 -INFO:local_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7201 -INFO:local_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7210 -INFO:local_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7219 -INFO:master_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7206 -INFO:local_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7207 -INFO:local_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7203 -INFO:local_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7207 -INFO:local_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7196 -INFO:local_logger:Epoch[046/800], Step[0100/0626], Avg Loss: 0.7207 -INFO:local_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7191 -INFO:local_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7207 -INFO:local_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7206 -INFO:local_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7199 -INFO:local_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7209 -INFO:local_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7203 -INFO:local_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7207 -INFO:master_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7204 -INFO:local_logger:Epoch[046/800], Step[0200/0626], Avg Loss: 0.7208 -INFO:local_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7199 -INFO:local_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7205 -INFO:local_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7204 -INFO:local_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7199 -INFO:local_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7201 -INFO:local_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7202 -INFO:local_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7202 -INFO:local_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7202 -INFO:master_logger:Epoch[046/800], Step[0300/0626], Avg Loss: 0.7202 -INFO:local_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7200 -INFO:local_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7205 -INFO:local_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7202 -INFO:master_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7200 -INFO:local_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7199 -INFO:local_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7201 -INFO:local_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[046/800], Step[0400/0626], Avg Loss: 0.7200 -INFO:local_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7197 -INFO:local_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7200 -INFO:local_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7201 -INFO:local_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7198 -INFO:master_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7199 -INFO:local_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7199 -INFO:local_logger:Epoch[046/800], Step[0500/0626], Avg Loss: 0.7204 -INFO:local_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7197 -INFO:local_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7203 -INFO:local_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7197 -INFO:local_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7199 -INFO:master_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[046/800], Step[0600/0626], Avg Loss: 0.7197 -INFO:local_logger:----- Epoch[046/800], Train Loss: 0.7199, time: 894.85 -INFO:local_logger:Now training epoch 47. LR=0.000150 -INFO:local_logger:----- Epoch[046/800], Train Loss: 0.7197, time: 894.90 -INFO:local_logger:Now training epoch 47. LR=0.000150 -INFO:local_logger:----- Epoch[046/800], Train Loss: 0.7198, time: 895.63 -INFO:local_logger:Now training epoch 47. LR=0.000150 -INFO:local_logger:----- Epoch[046/800], Train Loss: 0.7197, time: 895.84 -INFO:local_logger:Now training epoch 47. LR=0.000150 -INFO:local_logger:----- Epoch[046/800], Train Loss: 0.7197, time: 895.79 -INFO:local_logger:Now training epoch 47. LR=0.000150 -INFO:local_logger:----- Epoch[046/800], Train Loss: 0.7198, time: 892.44 -INFO:master_logger:----- Epoch[046/800], Train Loss: 0.7198, time: 892.44 -INFO:local_logger:----- Epoch[046/800], Train Loss: 0.7198, time: 895.48 -INFO:local_logger:Now training epoch 47. LR=0.000150 -INFO:local_logger:----- Epoch[046/800], Train Loss: 0.7204, time: 895.48 -INFO:local_logger:Now training epoch 47. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-46-Loss-0.7197591380592546.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-46-Loss-0.7197591380592546.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-46-Loss-0.7197591380592546.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-46-Loss-0.7197591380592546.pdopt -INFO:local_logger:Now training epoch 47. LR=0.000150 -INFO:master_logger:Now training epoch 47. LR=0.000150 -INFO:local_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7141 -INFO:local_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7210 -INFO:local_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7275 -INFO:local_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7216 -INFO:local_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7156 -INFO:local_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7147 -INFO:local_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7261 -INFO:local_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7192 -INFO:master_logger:Epoch[047/800], Step[0000/0626], Avg Loss: 0.7199 -INFO:local_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7184 -INFO:local_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7187 -INFO:master_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7189 -INFO:local_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7194 -INFO:local_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7183 -INFO:local_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7189 -INFO:local_logger:Epoch[047/800], Step[0100/0626], Avg Loss: 0.7196 -INFO:local_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7184 -INFO:local_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7190 -INFO:local_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7180 -INFO:master_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7185 -INFO:local_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7195 -INFO:local_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7186 -INFO:local_logger:Epoch[047/800], Step[0200/0626], Avg Loss: 0.7192 -INFO:local_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7182 -INFO:local_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7187 -INFO:local_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7182 -INFO:local_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7179 -INFO:local_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7184 -INFO:master_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7184 -INFO:local_logger:Epoch[047/800], Step[0300/0626], Avg Loss: 0.7184 -INFO:local_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7183 -INFO:local_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7180 -INFO:local_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7188 -INFO:master_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7184 -INFO:local_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7183 -INFO:local_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7185 -INFO:local_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0400/0626], Avg Loss: 0.7178 -INFO:local_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7187 -INFO:local_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7189 -INFO:local_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7179 -INFO:local_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7187 -INFO:local_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7182 -INFO:master_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7183 -INFO:local_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7179 -INFO:local_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7183 -INFO:local_logger:Epoch[047/800], Step[0500/0626], Avg Loss: 0.7180 -INFO:local_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7181 -INFO:local_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7185 -INFO:local_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7185 -INFO:local_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7188 -INFO:local_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7180 -INFO:local_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7179 -INFO:local_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7177 -INFO:local_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7181 -INFO:master_logger:Epoch[047/800], Step[0600/0626], Avg Loss: 0.7182 -INFO:local_logger:----- Epoch[047/800], Train Loss: 0.7188, time: 864.28 -INFO:local_logger:Now training epoch 48. LR=0.000150 -INFO:local_logger:----- Epoch[047/800], Train Loss: 0.7180, time: 863.82 -INFO:local_logger:Now training epoch 48. LR=0.000150 -INFO:local_logger:----- Epoch[047/800], Train Loss: 0.7183, time: 863.91 -INFO:local_logger:Now training epoch 48. LR=0.000150 -INFO:local_logger:----- Epoch[047/800], Train Loss: 0.7185, time: 864.34 -INFO:local_logger:Now training epoch 48. LR=0.000150 -INFO:local_logger:----- Epoch[047/800], Train Loss: 0.7177, time: 864.67 -INFO:local_logger:Now training epoch 48. LR=0.000150 -INFO:local_logger:----- Epoch[047/800], Train Loss: 0.7179, time: 864.05 -INFO:local_logger:Now training epoch 48. LR=0.000150 -INFO:local_logger:----- Epoch[047/800], Train Loss: 0.7182, time: 864.42 -INFO:local_logger:Now training epoch 48. LR=0.000150 -INFO:local_logger:----- Epoch[047/800], Train Loss: 0.7181, time: 860.27 -INFO:master_logger:----- Epoch[047/800], Train Loss: 0.7182, time: 860.27 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-47-Loss-0.7181137765215982.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-47-Loss-0.7181137765215982.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-47-Loss-0.7181137765215982.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-47-Loss-0.7181137765215982.pdopt -INFO:local_logger:Now training epoch 48. LR=0.000150 -INFO:master_logger:Now training epoch 48. LR=0.000150 -INFO:local_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7296 -INFO:local_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7261 -INFO:local_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7148 -INFO:local_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7056 -INFO:local_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7238 -INFO:master_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7205 -INFO:local_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7255 -INFO:local_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7134 -INFO:local_logger:Epoch[048/800], Step[0000/0626], Avg Loss: 0.7255 -INFO:local_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7177 -INFO:local_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7189 -INFO:local_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7174 -INFO:local_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7174 -INFO:local_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7175 -INFO:master_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7177 -INFO:local_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7172 -INFO:local_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7192 -INFO:local_logger:Epoch[048/800], Step[0100/0626], Avg Loss: 0.7164 -INFO:local_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7174 -INFO:local_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7176 -INFO:local_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7172 -INFO:local_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7181 -INFO:local_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7172 -INFO:master_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7174 -INFO:local_logger:Epoch[048/800], Step[0200/0626], Avg Loss: 0.7183 -INFO:local_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7169 -INFO:local_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7173 -INFO:local_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7171 -INFO:local_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7171 -INFO:local_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7168 -INFO:master_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7170 -INFO:local_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7172 -INFO:local_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7164 -INFO:local_logger:Epoch[048/800], Step[0300/0626], Avg Loss: 0.7176 -INFO:local_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7171 -INFO:local_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7171 -INFO:local_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7172 -INFO:master_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7170 -INFO:local_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7173 -INFO:local_logger:Epoch[048/800], Step[0400/0626], Avg Loss: 0.7170 -INFO:local_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7171 -INFO:local_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7169 -INFO:local_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7167 -INFO:local_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7169 -INFO:master_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7164 -INFO:local_logger:Epoch[048/800], Step[0500/0626], Avg Loss: 0.7170 -INFO:local_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7165 -INFO:local_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7168 -INFO:local_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7162 -INFO:local_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7166 -INFO:local_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7168 -INFO:master_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7166 -INFO:local_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7166 -INFO:local_logger:Epoch[048/800], Step[0600/0626], Avg Loss: 0.7168 -INFO:local_logger:----- Epoch[048/800], Train Loss: 0.7162, time: 894.29 -INFO:local_logger:Now training epoch 49. LR=0.000150 -INFO:local_logger:----- Epoch[048/800], Train Loss: 0.7165, time: 894.54 -INFO:local_logger:Now training epoch 49. LR=0.000150 -INFO:local_logger:----- Epoch[048/800], Train Loss: 0.7169, time: 894.25 -INFO:local_logger:Now training epoch 49. LR=0.000150 -INFO:local_logger:----- Epoch[048/800], Train Loss: 0.7165, time: 894.58 -INFO:local_logger:Now training epoch 49. LR=0.000150 -INFO:local_logger:----- Epoch[048/800], Train Loss: 0.7168, time: 894.96 -INFO:local_logger:Now training epoch 49. LR=0.000150 -INFO:local_logger:----- Epoch[048/800], Train Loss: 0.7165, time: 891.19 -INFO:local_logger:----- Epoch[048/800], Train Loss: 0.7169, time: 894.94 -INFO:master_logger:----- Epoch[048/800], Train Loss: 0.7166, time: 891.19 -INFO:local_logger:Now training epoch 49. LR=0.000150 -INFO:local_logger:----- Epoch[048/800], Train Loss: 0.7167, time: 895.17 -INFO:local_logger:Now training epoch 49. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-48-Loss-0.7164831838117034.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-48-Loss-0.7164831838117034.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-48-Loss-0.7164831838117034.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-48-Loss-0.7164831838117034.pdopt -INFO:local_logger:Now training epoch 49. LR=0.000150 -INFO:master_logger:Now training epoch 49. LR=0.000150 -INFO:local_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7106 -INFO:local_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7079 -INFO:local_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7136 -INFO:master_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7139 -INFO:local_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7094 -INFO:local_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7180 -INFO:local_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7324 -INFO:local_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7095 -INFO:local_logger:Epoch[049/800], Step[0000/0626], Avg Loss: 0.7095 -INFO:local_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7149 -INFO:local_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7149 -INFO:local_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7153 -INFO:local_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7157 -INFO:local_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7161 -INFO:local_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7156 -INFO:master_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7153 -INFO:local_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7150 -INFO:local_logger:Epoch[049/800], Step[0100/0626], Avg Loss: 0.7151 -INFO:local_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7156 -INFO:local_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7158 -INFO:local_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7151 -INFO:local_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7161 -INFO:local_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7155 -INFO:local_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7153 -INFO:local_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7156 -INFO:local_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7152 -INFO:master_logger:Epoch[049/800], Step[0200/0626], Avg Loss: 0.7155 -INFO:local_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7155 -INFO:local_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7153 -INFO:local_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7159 -INFO:local_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7162 -INFO:master_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7156 -INFO:local_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7157 -INFO:local_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7152 -INFO:local_logger:Epoch[049/800], Step[0300/0626], Avg Loss: 0.7156 -INFO:local_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7155 -INFO:local_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7150 -INFO:local_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7160 -INFO:local_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7158 -INFO:master_logger:Epoch[049/800], Step[0400/0626], Avg Loss: 0.7155 -INFO:local_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7151 -INFO:local_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7156 -INFO:local_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7155 -INFO:local_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7156 -INFO:master_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7153 -INFO:local_logger:Epoch[049/800], Step[0500/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7151 -INFO:local_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7151 -INFO:local_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7153 -INFO:local_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7154 -INFO:local_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7152 -INFO:local_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7151 -INFO:local_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7150 -INFO:master_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7152 -INFO:local_logger:Epoch[049/800], Step[0600/0626], Avg Loss: 0.7153 -INFO:local_logger:----- Epoch[049/800], Train Loss: 0.7150, time: 858.69 -INFO:local_logger:Now training epoch 50. LR=0.000150 -INFO:local_logger:----- Epoch[049/800], Train Loss: 0.7151, time: 857.91 -INFO:local_logger:Now training epoch 50. LR=0.000150 -INFO:local_logger:----- Epoch[049/800], Train Loss: 0.7152, time: 858.69 -INFO:local_logger:Now training epoch 50. LR=0.000150 -INFO:local_logger:----- Epoch[049/800], Train Loss: 0.7152, time: 857.99 -INFO:local_logger:Now training epoch 50. LR=0.000150 -INFO:local_logger:----- Epoch[049/800], Train Loss: 0.7151, time: 858.78 -INFO:local_logger:Now training epoch 50. LR=0.000150 -INFO:local_logger:----- Epoch[049/800], Train Loss: 0.7151, time: 858.00 -INFO:local_logger:Now training epoch 50. LR=0.000150 -INFO:local_logger:----- Epoch[049/800], Train Loss: 0.7154, time: 858.39 -INFO:local_logger:Now training epoch 50. LR=0.000150 -INFO:local_logger:----- Epoch[049/800], Train Loss: 0.7153, time: 854.33 -INFO:master_logger:----- Epoch[049/800], Train Loss: 0.7152, time: 854.33 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-49-Loss-0.715259619077928.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-49-Loss-0.715259619077928.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-49-Loss-0.715259619077928.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-49-Loss-0.715259619077928.pdopt -INFO:local_logger:Now training epoch 50. LR=0.000150 -INFO:master_logger:Now training epoch 50. LR=0.000150 -INFO:local_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7160 -INFO:local_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7233 -INFO:master_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7153 -INFO:local_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7204 -INFO:local_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7224 -INFO:local_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7126 -INFO:local_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7130 -INFO:local_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7084 -INFO:local_logger:Epoch[050/800], Step[0000/0626], Avg Loss: 0.7066 -INFO:local_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7145 -INFO:local_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7148 -INFO:local_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7134 -INFO:local_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7152 -INFO:local_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7152 -INFO:local_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7142 -INFO:master_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7145 -INFO:local_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7144 -INFO:local_logger:Epoch[050/800], Step[0100/0626], Avg Loss: 0.7141 -INFO:local_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7143 -INFO:local_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7138 -INFO:local_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7142 -INFO:local_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7137 -INFO:local_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7148 -INFO:master_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7142 -INFO:local_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7145 -INFO:local_logger:Epoch[050/800], Step[0200/0626], Avg Loss: 0.7143 -INFO:local_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7144 -INFO:local_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7141 -INFO:local_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7143 -INFO:local_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7141 -INFO:local_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7143 -INFO:local_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7137 -INFO:local_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7137 -INFO:master_logger:Epoch[050/800], Step[0300/0626], Avg Loss: 0.7141 -INFO:local_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7139 -INFO:local_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7143 -INFO:local_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7136 -INFO:local_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7137 -INFO:local_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7139 -INFO:local_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7139 -INFO:local_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7142 -INFO:master_logger:Epoch[050/800], Step[0400/0626], Avg Loss: 0.7139 -INFO:local_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7137 -INFO:local_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7142 -INFO:local_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7137 -INFO:master_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7139 -INFO:local_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7136 -INFO:local_logger:Epoch[050/800], Step[0500/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7139 -INFO:local_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7142 -INFO:local_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7138 -INFO:local_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7141 -INFO:local_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7141 -INFO:local_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7141 -INFO:master_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[050/800], Step[0600/0626], Avg Loss: 0.7141 -INFO:local_logger:----- Epoch[050/800], Train Loss: 0.7141, time: 882.33 -INFO:local_logger:Now training epoch 51. LR=0.000150 -INFO:local_logger:----- Epoch[050/800], Train Loss: 0.7141, time: 882.32 -INFO:local_logger:Now training epoch 51. LR=0.000150 -INFO:local_logger:----- Epoch[050/800], Train Loss: 0.7139, time: 878.40 -INFO:master_logger:----- Epoch[050/800], Train Loss: 0.7140, time: 878.40 -INFO:local_logger:----- Epoch[050/800], Train Loss: 0.7144, time: 882.28 -INFO:local_logger:Now training epoch 51. LR=0.000150 -INFO:local_logger:----- Epoch[050/800], Train Loss: 0.7139, time: 882.66 -INFO:local_logger:Now training epoch 51. LR=0.000150 -INFO:local_logger:----- Epoch[050/800], Train Loss: 0.7140, time: 882.66 -INFO:local_logger:Now training epoch 51. LR=0.000150 -INFO:local_logger:----- Epoch[050/800], Train Loss: 0.7138, time: 882.67 -INFO:local_logger:Now training epoch 51. LR=0.000150 -INFO:local_logger:----- Epoch[050/800], Train Loss: 0.7140, time: 882.77 -INFO:local_logger:Now training epoch 51. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-50-Loss-0.7138677369669435.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-50-Loss-0.7138677369669435.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-50-Loss-0.7138677369669435.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-50-Loss-0.7138677369669435.pdopt -INFO:local_logger:Now training epoch 51. LR=0.000150 -INFO:master_logger:Now training epoch 51. LR=0.000150 -INFO:local_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7092 -INFO:local_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7248 -INFO:local_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7198 -INFO:local_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7048 -INFO:master_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7142 -INFO:local_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7200 -INFO:local_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7138 -INFO:local_logger:Epoch[051/800], Step[0000/0626], Avg Loss: 0.7145 -INFO:local_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7135 -INFO:local_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7127 -INFO:local_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7135 -INFO:local_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7137 -INFO:local_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7118 -INFO:local_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7132 -INFO:master_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7129 -INFO:local_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7122 -INFO:local_logger:Epoch[051/800], Step[0100/0626], Avg Loss: 0.7129 -INFO:local_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7131 -INFO:local_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7137 -INFO:local_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7133 -INFO:local_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7130 -INFO:local_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7130 -INFO:local_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7133 -INFO:local_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7129 -INFO:master_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7130 -INFO:local_logger:Epoch[051/800], Step[0200/0626], Avg Loss: 0.7120 -INFO:local_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7131 -INFO:local_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7130 -INFO:local_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7125 -INFO:master_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7129 -INFO:local_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7131 -INFO:local_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7127 -INFO:local_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7129 -INFO:local_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7128 -INFO:local_logger:Epoch[051/800], Step[0300/0626], Avg Loss: 0.7129 -INFO:local_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7126 -INFO:local_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7126 -INFO:local_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7130 -INFO:local_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7130 -INFO:local_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7128 -INFO:local_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7122 -INFO:master_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7128 -INFO:local_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7131 -INFO:local_logger:Epoch[051/800], Step[0400/0626], Avg Loss: 0.7127 -INFO:local_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7124 -INFO:local_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7128 -INFO:local_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7127 -INFO:local_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7122 -INFO:local_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7129 -INFO:local_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7129 -INFO:master_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7126 -INFO:local_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7126 -INFO:local_logger:Epoch[051/800], Step[0500/0626], Avg Loss: 0.7126 -INFO:local_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7126 -INFO:local_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7128 -INFO:local_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7125 -INFO:local_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7128 -INFO:local_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7128 -INFO:local_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7122 -INFO:local_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7120 -INFO:master_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7125 -INFO:local_logger:Epoch[051/800], Step[0600/0626], Avg Loss: 0.7125 -INFO:local_logger:----- Epoch[051/800], Train Loss: 0.7127, time: 848.53 -INFO:local_logger:Now training epoch 52. LR=0.000150 -INFO:local_logger:----- Epoch[051/800], Train Loss: 0.7120, time: 845.42 -INFO:master_logger:----- Epoch[051/800], Train Loss: 0.7125, time: 845.42 -INFO:local_logger:----- Epoch[051/800], Train Loss: 0.7125, time: 849.57 -INFO:local_logger:Now training epoch 52. LR=0.000150 -INFO:local_logger:----- Epoch[051/800], Train Loss: 0.7126, time: 849.50 -INFO:local_logger:Now training epoch 52. LR=0.000150 -INFO:local_logger:----- Epoch[051/800], Train Loss: 0.7127, time: 849.57 -INFO:local_logger:Now training epoch 52. LR=0.000150 -INFO:local_logger:----- Epoch[051/800], Train Loss: 0.7126, time: 849.14 -INFO:local_logger:Now training epoch 52. LR=0.000150 -INFO:local_logger:----- Epoch[051/800], Train Loss: 0.7122, time: 849.16 -INFO:local_logger:Now training epoch 52. LR=0.000150 -INFO:local_logger:----- Epoch[051/800], Train Loss: 0.7126, time: 849.16 -INFO:local_logger:Now training epoch 52. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-51-Loss-0.7120018708518765.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-51-Loss-0.7120018708518765.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-51-Loss-0.7120018708518765.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-51-Loss-0.7120018708518765.pdopt -INFO:local_logger:Now training epoch 52. LR=0.000150 -INFO:master_logger:Now training epoch 52. LR=0.000150 -INFO:local_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.7075 -INFO:local_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.7092 -INFO:local_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.7105 -INFO:master_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.7076 -INFO:local_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.6988 -INFO:local_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.7034 -INFO:local_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.7050 -INFO:local_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.7185 -INFO:local_logger:Epoch[052/800], Step[0000/0626], Avg Loss: 0.7081 -INFO:local_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7119 -INFO:local_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7123 -INFO:local_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7110 -INFO:local_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7121 -INFO:local_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7118 -INFO:local_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7113 -INFO:local_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7116 -INFO:master_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7117 -INFO:local_logger:Epoch[052/800], Step[0100/0626], Avg Loss: 0.7119 -INFO:local_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7110 -INFO:local_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7116 -INFO:local_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7122 -INFO:local_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7114 -INFO:local_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7115 -INFO:local_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7111 -INFO:master_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7115 -INFO:local_logger:Epoch[052/800], Step[0200/0626], Avg Loss: 0.7123 -INFO:local_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7116 -INFO:local_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7117 -INFO:local_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7109 -INFO:local_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7114 -INFO:master_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7113 -INFO:local_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7111 -INFO:local_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7113 -INFO:local_logger:Epoch[052/800], Step[0300/0626], Avg Loss: 0.7116 -INFO:local_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7117 -INFO:local_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7113 -INFO:local_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7110 -INFO:master_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7111 -INFO:local_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7107 -INFO:local_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7117 -INFO:local_logger:Epoch[052/800], Step[0400/0626], Avg Loss: 0.7111 -INFO:local_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7107 -INFO:local_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7115 -INFO:local_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7114 -INFO:local_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7107 -INFO:master_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7111 -INFO:local_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7109 -INFO:local_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7116 -INFO:local_logger:Epoch[052/800], Step[0500/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7106 -INFO:local_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7113 -INFO:local_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7114 -INFO:local_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7109 -INFO:local_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7113 -INFO:local_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7110 -INFO:local_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7113 -INFO:master_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7111 -INFO:local_logger:Epoch[052/800], Step[0600/0626], Avg Loss: 0.7107 -INFO:local_logger:----- Epoch[052/800], Train Loss: 0.7107, time: 899.12 -INFO:local_logger:Now training epoch 53. LR=0.000150 -INFO:local_logger:----- Epoch[052/800], Train Loss: 0.7109, time: 899.76 -INFO:local_logger:Now training epoch 53. LR=0.000150 -INFO:local_logger:----- Epoch[052/800], Train Loss: 0.7113, time: 899.78 -INFO:local_logger:Now training epoch 53. LR=0.000150 -INFO:local_logger:----- Epoch[052/800], Train Loss: 0.7113, time: 900.39 -INFO:local_logger:----- Epoch[052/800], Train Loss: 0.7114, time: 896.39 -INFO:local_logger:Now training epoch 53. LR=0.000150 -INFO:master_logger:----- Epoch[052/800], Train Loss: 0.7110, time: 896.39 -INFO:local_logger:----- Epoch[052/800], Train Loss: 0.7109, time: 899.78 -INFO:local_logger:Now training epoch 53. LR=0.000150 -INFO:local_logger:----- Epoch[052/800], Train Loss: 0.7105, time: 899.79 -INFO:local_logger:Now training epoch 53. LR=0.000150 -INFO:local_logger:----- Epoch[052/800], Train Loss: 0.7112, time: 899.77 -INFO:local_logger:Now training epoch 53. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-52-Loss-0.7113885321068728.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-52-Loss-0.7113885321068728.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-52-Loss-0.7113885321068728.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-52-Loss-0.7113885321068728.pdopt -INFO:local_logger:Now training epoch 53. LR=0.000150 -INFO:master_logger:Now training epoch 53. LR=0.000150 -INFO:local_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7064 -INFO:master_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7098 -INFO:local_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7075 -INFO:local_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7109 -INFO:local_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7135 -INFO:local_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7272 -INFO:local_logger:Epoch[053/800], Step[0000/0626], Avg Loss: 0.7107 -INFO:local_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7100 -INFO:master_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7105 -INFO:local_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7106 -INFO:local_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7117 -INFO:local_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7093 -INFO:local_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7106 -INFO:local_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7110 -INFO:local_logger:Epoch[053/800], Step[0100/0626], Avg Loss: 0.7096 -INFO:local_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7102 -INFO:local_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7106 -INFO:local_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7117 -INFO:local_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7098 -INFO:local_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7102 -INFO:local_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7105 -INFO:master_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7103 -INFO:local_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7102 -INFO:local_logger:Epoch[053/800], Step[0200/0626], Avg Loss: 0.7094 -INFO:local_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7105 -INFO:local_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7101 -INFO:local_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7099 -INFO:local_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7094 -INFO:local_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7103 -INFO:local_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7107 -INFO:master_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7101 -INFO:local_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7099 -INFO:local_logger:Epoch[053/800], Step[0300/0626], Avg Loss: 0.7101 -INFO:local_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7103 -INFO:local_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7096 -INFO:local_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7104 -INFO:local_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7103 -INFO:local_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7102 -INFO:master_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7102 -INFO:local_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7104 -INFO:local_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7101 -INFO:local_logger:Epoch[053/800], Step[0400/0626], Avg Loss: 0.7103 -INFO:local_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7105 -INFO:local_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7104 -INFO:local_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7103 -INFO:local_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7101 -INFO:local_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7096 -INFO:local_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7102 -INFO:local_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7102 -INFO:master_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7102 -INFO:local_logger:Epoch[053/800], Step[0500/0626], Avg Loss: 0.7103 -INFO:local_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7094 -INFO:local_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7102 -INFO:local_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7101 -INFO:local_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7100 -INFO:local_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7102 -INFO:local_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7101 -INFO:master_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7100 -INFO:local_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7101 -INFO:local_logger:Epoch[053/800], Step[0600/0626], Avg Loss: 0.7098 -INFO:local_logger:----- Epoch[053/800], Train Loss: 0.7102, time: 889.34 -INFO:master_logger:----- Epoch[053/800], Train Loss: 0.7100, time: 889.34 -INFO:local_logger:----- Epoch[053/800], Train Loss: 0.7102, time: 893.08 -INFO:local_logger:Now training epoch 54. LR=0.000150 -INFO:local_logger:----- Epoch[053/800], Train Loss: 0.7100, time: 893.08 -INFO:local_logger:Now training epoch 54. LR=0.000150 -INFO:local_logger:----- Epoch[053/800], Train Loss: 0.7094, time: 893.08 -INFO:local_logger:Now training epoch 54. LR=0.000150 -INFO:local_logger:----- Epoch[053/800], Train Loss: 0.7100, time: 893.10 -INFO:local_logger:Now training epoch 54. LR=0.000150 -INFO:local_logger:----- Epoch[053/800], Train Loss: 0.7100, time: 893.10 -INFO:local_logger:Now training epoch 54. LR=0.000150 -INFO:local_logger:----- Epoch[053/800], Train Loss: 0.7100, time: 893.75 -INFO:local_logger:Now training epoch 54. LR=0.000150 -INFO:local_logger:----- Epoch[053/800], Train Loss: 0.7098, time: 893.11 -INFO:local_logger:Now training epoch 54. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-53-Loss-0.7102464560284915.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-53-Loss-0.7102464560284915.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-53-Loss-0.7102464560284915.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-53-Loss-0.7102464560284915.pdopt -INFO:local_logger:Now training epoch 54. LR=0.000150 -INFO:master_logger:Now training epoch 54. LR=0.000150 -INFO:local_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7001 -INFO:local_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7124 -INFO:local_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7103 -INFO:local_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7021 -INFO:master_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7094 -INFO:local_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7254 -INFO:local_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7077 -INFO:local_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7109 -INFO:local_logger:Epoch[054/800], Step[0000/0626], Avg Loss: 0.7063 -INFO:local_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7094 -INFO:local_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7105 -INFO:local_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7093 -INFO:master_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7094 -INFO:local_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7095 -INFO:local_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7089 -INFO:local_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7095 -INFO:local_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7097 -INFO:local_logger:Epoch[054/800], Step[0100/0626], Avg Loss: 0.7087 -INFO:local_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7082 -INFO:local_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7086 -INFO:local_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7092 -INFO:local_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7092 -INFO:local_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7087 -INFO:local_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7085 -INFO:master_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0200/0626], Avg Loss: 0.7093 -INFO:local_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7083 -INFO:local_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7089 -INFO:local_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7089 -INFO:local_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7089 -INFO:local_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7085 -INFO:master_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0300/0626], Avg Loss: 0.7092 -INFO:local_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7089 -INFO:local_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7090 -INFO:local_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7083 -INFO:master_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7090 -INFO:local_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7091 -INFO:local_logger:Epoch[054/800], Step[0400/0626], Avg Loss: 0.7083 -INFO:local_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7087 -INFO:local_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7083 -INFO:local_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7084 -INFO:local_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7085 -INFO:local_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7087 -INFO:local_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7090 -INFO:local_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7090 -INFO:master_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7086 -INFO:local_logger:Epoch[054/800], Step[0500/0626], Avg Loss: 0.7083 -INFO:local_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7083 -INFO:local_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7085 -INFO:local_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7087 -INFO:master_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7085 -INFO:local_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7081 -INFO:local_logger:Epoch[054/800], Step[0600/0626], Avg Loss: 0.7081 -INFO:local_logger:----- Epoch[054/800], Train Loss: 0.7087, time: 903.80 -INFO:local_logger:Now training epoch 55. LR=0.000150 -INFO:local_logger:----- Epoch[054/800], Train Loss: 0.7082, time: 903.85 -INFO:local_logger:Now training epoch 55. LR=0.000150 -INFO:local_logger:----- Epoch[054/800], Train Loss: 0.7083, time: 904.37 -INFO:local_logger:Now training epoch 55. LR=0.000150 -INFO:local_logger:----- Epoch[054/800], Train Loss: 0.7081, time: 904.37 -INFO:local_logger:Now training epoch 55. LR=0.000150 -INFO:local_logger:----- Epoch[054/800], Train Loss: 0.7085, time: 904.35 -INFO:local_logger:Now training epoch 55. LR=0.000150 -INFO:local_logger:----- Epoch[054/800], Train Loss: 0.7087, time: 904.43 -INFO:local_logger:Now training epoch 55. LR=0.000150 -INFO:local_logger:----- Epoch[054/800], Train Loss: 0.7088, time: 900.86 -INFO:master_logger:----- Epoch[054/800], Train Loss: 0.7085, time: 900.86 -INFO:local_logger:----- Epoch[054/800], Train Loss: 0.7087, time: 904.47 -INFO:local_logger:Now training epoch 55. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-54-Loss-0.7087632936406654.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-54-Loss-0.7087632936406654.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-54-Loss-0.7087632936406654.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-54-Loss-0.7087632936406654.pdopt -INFO:local_logger:Now training epoch 55. LR=0.000150 -INFO:master_logger:Now training epoch 55. LR=0.000150 -INFO:local_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.6897 -INFO:master_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.7057 -INFO:local_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.7023 -INFO:local_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.6937 -INFO:local_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.7040 -INFO:local_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.7203 -INFO:local_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.7165 -INFO:local_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.7079 -INFO:local_logger:Epoch[055/800], Step[0000/0626], Avg Loss: 0.7108 -INFO:local_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7093 -INFO:local_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7084 -INFO:local_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7081 -INFO:local_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7084 -INFO:master_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7083 -INFO:local_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7082 -INFO:local_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7082 -INFO:local_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7078 -INFO:local_logger:Epoch[055/800], Step[0100/0626], Avg Loss: 0.7078 -INFO:local_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7086 -INFO:local_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7080 -INFO:local_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7086 -INFO:master_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7081 -INFO:local_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7088 -INFO:local_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7078 -INFO:local_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7078 -INFO:local_logger:Epoch[055/800], Step[0200/0626], Avg Loss: 0.7077 -INFO:local_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7071 -INFO:local_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7086 -INFO:local_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7076 -INFO:master_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7080 -INFO:local_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7086 -INFO:local_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7078 -INFO:local_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7079 -INFO:local_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7079 -INFO:local_logger:Epoch[055/800], Step[0300/0626], Avg Loss: 0.7083 -INFO:local_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7073 -INFO:local_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7081 -INFO:local_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7079 -INFO:local_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7076 -INFO:local_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7078 -INFO:local_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7087 -INFO:master_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7079 -INFO:local_logger:Epoch[055/800], Step[0400/0626], Avg Loss: 0.7081 -INFO:local_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7087 -INFO:local_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7077 -INFO:local_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7082 -INFO:local_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7076 -INFO:local_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7073 -INFO:master_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7078 -INFO:local_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7079 -INFO:local_logger:Epoch[055/800], Step[0500/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7073 -INFO:local_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7081 -INFO:local_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7072 -INFO:local_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7084 -INFO:master_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7076 -INFO:local_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7077 -INFO:local_logger:Epoch[055/800], Step[0600/0626], Avg Loss: 0.7074 -INFO:local_logger:----- Epoch[055/800], Train Loss: 0.7081, time: 859.40 -INFO:local_logger:Now training epoch 56. LR=0.000150 -INFO:local_logger:----- Epoch[055/800], Train Loss: 0.7074, time: 855.79 -INFO:master_logger:----- Epoch[055/800], Train Loss: 0.7076, time: 855.79 -INFO:local_logger:----- Epoch[055/800], Train Loss: 0.7073, time: 859.94 -INFO:local_logger:Now training epoch 56. LR=0.000150 -INFO:local_logger:----- Epoch[055/800], Train Loss: 0.7073, time: 859.85 -INFO:local_logger:Now training epoch 56. LR=0.000150 -INFO:local_logger:----- Epoch[055/800], Train Loss: 0.7072, time: 860.45 -INFO:local_logger:Now training epoch 56. LR=0.000150 -INFO:local_logger:----- Epoch[055/800], Train Loss: 0.7074, time: 859.97 -INFO:local_logger:Now training epoch 56. LR=0.000150 -INFO:local_logger:----- Epoch[055/800], Train Loss: 0.7077, time: 859.89 -INFO:local_logger:Now training epoch 56. LR=0.000150 -INFO:local_logger:----- Epoch[055/800], Train Loss: 0.7083, time: 860.52 -INFO:local_logger:Now training epoch 56. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-55-Loss-0.7074442110429905.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-55-Loss-0.7074442110429905.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-55-Loss-0.7074442110429905.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-55-Loss-0.7074442110429905.pdopt -INFO:local_logger:Now training epoch 56. LR=0.000150 -INFO:master_logger:Now training epoch 56. LR=0.000150 -INFO:local_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.6969 -INFO:local_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.7058 -INFO:local_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.7068 -INFO:master_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.7070 -INFO:local_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.7114 -INFO:local_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.7124 -INFO:local_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.6985 -INFO:local_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.7150 -INFO:local_logger:Epoch[056/800], Step[0000/0626], Avg Loss: 0.7089 -INFO:local_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7066 -INFO:local_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7079 -INFO:local_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7056 -INFO:master_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7066 -INFO:local_logger:Epoch[056/800], Step[0100/0626], Avg Loss: 0.7070 -INFO:local_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7071 -INFO:local_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7066 -INFO:local_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7075 -INFO:local_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7059 -INFO:master_logger:Epoch[056/800], Step[0200/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7077 -INFO:local_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7064 -INFO:local_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7067 -INFO:local_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7067 -INFO:local_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7059 -INFO:local_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7065 -INFO:master_logger:Epoch[056/800], Step[0300/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7060 -INFO:local_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7065 -INFO:local_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7066 -INFO:local_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7073 -INFO:local_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7072 -INFO:master_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7067 -INFO:local_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7063 -INFO:local_logger:Epoch[056/800], Step[0400/0626], Avg Loss: 0.7067 -INFO:local_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7063 -INFO:local_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7069 -INFO:master_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7066 -INFO:local_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7067 -INFO:local_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[056/800], Step[0500/0626], Avg Loss: 0.7066 -INFO:local_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7066 -INFO:local_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7069 -INFO:local_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7063 -INFO:local_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7061 -INFO:master_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7065 -INFO:local_logger:Epoch[056/800], Step[0600/0626], Avg Loss: 0.7065 -INFO:local_logger:----- Epoch[056/800], Train Loss: 0.7067, time: 890.40 -INFO:local_logger:Now training epoch 57. LR=0.000150 -INFO:local_logger:----- Epoch[056/800], Train Loss: 0.7065, time: 890.61 -INFO:local_logger:Now training epoch 57. LR=0.000150 -INFO:local_logger:----- Epoch[056/800], Train Loss: 0.7066, time: 890.94 -INFO:local_logger:Now training epoch 57. LR=0.000150 -INFO:local_logger:----- Epoch[056/800], Train Loss: 0.7068, time: 891.55 -INFO:local_logger:----- Epoch[056/800], Train Loss: 0.7063, time: 890.99 -INFO:local_logger:Now training epoch 57. LR=0.000150 -INFO:local_logger:----- Epoch[056/800], Train Loss: 0.7060, time: 891.01 -INFO:local_logger:Now training epoch 57. LR=0.000150 -INFO:local_logger:Now training epoch 57. LR=0.000150 -INFO:local_logger:----- Epoch[056/800], Train Loss: 0.7063, time: 887.67 -INFO:master_logger:----- Epoch[056/800], Train Loss: 0.7065, time: 887.67 -INFO:local_logger:----- Epoch[056/800], Train Loss: 0.7069, time: 890.98 -INFO:local_logger:Now training epoch 57. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-56-Loss-0.7062517588045653.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-56-Loss-0.7062517588045653.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-56-Loss-0.7062517588045653.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-56-Loss-0.7062517588045653.pdopt -INFO:local_logger:Now training epoch 57. LR=0.000150 -INFO:master_logger:Now training epoch 57. LR=0.000150 -INFO:local_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.7140 -INFO:local_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.6994 -INFO:local_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.7131 -INFO:master_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.7096 -INFO:local_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.7165 -INFO:local_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.7022 -INFO:local_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.7034 -INFO:local_logger:Epoch[057/800], Step[0000/0626], Avg Loss: 0.7171 -INFO:local_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7063 -INFO:local_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7057 -INFO:local_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7054 -INFO:local_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7067 -INFO:local_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7053 -INFO:local_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7065 -INFO:master_logger:Epoch[057/800], Step[0100/0626], Avg Loss: 0.7061 -INFO:local_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7056 -INFO:local_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7067 -INFO:local_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7058 -INFO:local_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7057 -INFO:local_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7051 -INFO:master_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7059 -INFO:local_logger:Epoch[057/800], Step[0200/0626], Avg Loss: 0.7061 -INFO:local_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7056 -INFO:local_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7059 -INFO:local_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7052 -INFO:local_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7058 -INFO:local_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7054 -INFO:master_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7057 -INFO:local_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7054 -INFO:local_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7063 -INFO:local_logger:Epoch[057/800], Step[0300/0626], Avg Loss: 0.7061 -INFO:local_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7058 -INFO:local_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7060 -INFO:local_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7052 -INFO:local_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7059 -INFO:local_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7056 -INFO:local_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7063 -INFO:local_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7055 -INFO:master_logger:Epoch[057/800], Step[0400/0626], Avg Loss: 0.7058 -INFO:local_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7054 -INFO:local_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7050 -INFO:local_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7059 -INFO:local_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7055 -INFO:local_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7061 -INFO:local_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7057 -INFO:local_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7059 -INFO:master_logger:Epoch[057/800], Step[0500/0626], Avg Loss: 0.7057 -INFO:local_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7051 -INFO:local_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7058 -INFO:local_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7054 -INFO:local_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7054 -INFO:local_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7057 -INFO:local_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7062 -INFO:local_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7056 -INFO:master_logger:Epoch[057/800], Step[0600/0626], Avg Loss: 0.7057 -INFO:local_logger:----- Epoch[057/800], Train Loss: 0.7060, time: 853.51 -INFO:local_logger:Now training epoch 58. LR=0.000150 -INFO:local_logger:----- Epoch[057/800], Train Loss: 0.7053, time: 853.54 -INFO:local_logger:Now training epoch 58. LR=0.000150 -INFO:local_logger:----- Epoch[057/800], Train Loss: 0.7057, time: 854.16 -INFO:local_logger:Now training epoch 58. LR=0.000150 -INFO:local_logger:----- Epoch[057/800], Train Loss: 0.7050, time: 854.00 -INFO:local_logger:----- Epoch[057/800], Train Loss: 0.7061, time: 854.36 -INFO:local_logger:Now training epoch 58. LR=0.000150 -INFO:local_logger:Now training epoch 58. LR=0.000150 -INFO:local_logger:----- Epoch[057/800], Train Loss: 0.7056, time: 849.91 -INFO:local_logger:----- Epoch[057/800], Train Loss: 0.7054, time: 853.96 -INFO:master_logger:----- Epoch[057/800], Train Loss: 0.7056, time: 849.91 -INFO:local_logger:Now training epoch 58. LR=0.000150 -INFO:local_logger:----- Epoch[057/800], Train Loss: 0.7056, time: 853.96 -INFO:local_logger:Now training epoch 58. LR=0.000150 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-57-Loss-0.70561545900947.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-57-Loss-0.70561545900947.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-57-Loss-0.70561545900947.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-57-Loss-0.70561545900947.pdopt -INFO:local_logger:Now training epoch 58. LR=0.000150 -INFO:master_logger:Now training epoch 58. LR=0.000150 -INFO:local_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.7087 -INFO:local_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.6950 -INFO:master_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.7032 -INFO:local_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.7035 -INFO:local_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.7008 -INFO:local_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.6990 -INFO:local_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.7091 -INFO:local_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.7010 -INFO:local_logger:Epoch[058/800], Step[0000/0626], Avg Loss: 0.7083 -INFO:local_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7057 -INFO:local_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7047 -INFO:local_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7033 -INFO:local_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7035 -INFO:local_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7044 -INFO:local_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7042 -INFO:local_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7048 -INFO:local_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7051 -INFO:master_logger:Epoch[058/800], Step[0100/0626], Avg Loss: 0.7045 -INFO:local_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7056 -INFO:local_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7048 -INFO:local_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7050 -INFO:local_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7052 -INFO:local_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7048 -INFO:master_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7048 -INFO:local_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7051 -INFO:local_logger:Epoch[058/800], Step[0200/0626], Avg Loss: 0.7032 -INFO:local_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7048 -INFO:local_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7052 -INFO:local_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7051 -INFO:local_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7046 -INFO:master_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7047 -INFO:local_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7049 -INFO:local_logger:Epoch[058/800], Step[0300/0626], Avg Loss: 0.7035 -INFO:local_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7047 -INFO:local_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7047 -INFO:local_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7048 -INFO:local_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7035 -INFO:local_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7048 -INFO:master_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7047 -INFO:local_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0400/0626], Avg Loss: 0.7047 -INFO:local_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7048 -INFO:local_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7043 -INFO:local_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7034 -INFO:master_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7044 -INFO:local_logger:Epoch[058/800], Step[0500/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7045 -INFO:local_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7045 -INFO:local_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7046 -INFO:master_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7044 -INFO:local_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7045 -INFO:local_logger:Epoch[058/800], Step[0600/0626], Avg Loss: 0.7042 -INFO:local_logger:----- Epoch[058/800], Train Loss: 0.7045, time: 883.86 -INFO:master_logger:----- Epoch[058/800], Train Loss: 0.7044, time: 883.86 -INFO:local_logger:----- Epoch[058/800], Train Loss: 0.7044, time: 888.09 -INFO:local_logger:Now training epoch 59. LR=0.000151 -INFO:local_logger:----- Epoch[058/800], Train Loss: 0.7043, time: 888.09 -INFO:local_logger:Now training epoch 59. LR=0.000151 -INFO:local_logger:----- Epoch[058/800], Train Loss: 0.7045, time: 887.67 -INFO:local_logger:Now training epoch 59. LR=0.000151 -INFO:local_logger:----- Epoch[058/800], Train Loss: 0.7036, time: 887.78 -INFO:local_logger:Now training epoch 59. LR=0.000151 -INFO:local_logger:----- Epoch[058/800], Train Loss: 0.7046, time: 888.37 -INFO:local_logger:Now training epoch 59. LR=0.000151 -INFO:local_logger:----- Epoch[058/800], Train Loss: 0.7045, time: 887.96 -INFO:local_logger:Now training epoch 59. LR=0.000151 -INFO:local_logger:----- Epoch[058/800], Train Loss: 0.7046, time: 887.96 -INFO:local_logger:Now training epoch 59. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-58-Loss-0.704508643255555.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-58-Loss-0.704508643255555.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-58-Loss-0.704508643255555.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-58-Loss-0.704508643255555.pdopt -INFO:local_logger:Now training epoch 59. LR=0.000151 -INFO:master_logger:Now training epoch 59. LR=0.000151 -INFO:local_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.6945 -INFO:master_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.7041 -INFO:local_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.7028 -INFO:local_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.7075 -INFO:local_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.7132 -INFO:local_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.7134 -INFO:local_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.7138 -INFO:local_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.6925 -INFO:local_logger:Epoch[059/800], Step[0000/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7022 -INFO:local_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7047 -INFO:local_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7039 -INFO:local_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7043 -INFO:master_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7041 -INFO:local_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7048 -INFO:local_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7046 -INFO:local_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7044 -INFO:local_logger:Epoch[059/800], Step[0100/0626], Avg Loss: 0.7034 -INFO:local_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7040 -INFO:local_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7040 -INFO:local_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7041 -INFO:local_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7032 -INFO:local_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7039 -INFO:master_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7037 -INFO:local_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7021 -INFO:local_logger:Epoch[059/800], Step[0200/0626], Avg Loss: 0.7043 -INFO:local_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7041 -INFO:local_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7040 -INFO:master_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7033 -INFO:local_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7027 -INFO:local_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7040 -INFO:local_logger:Epoch[059/800], Step[0300/0626], Avg Loss: 0.7034 -INFO:local_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7037 -INFO:local_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7040 -INFO:local_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7029 -INFO:local_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7037 -INFO:local_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7035 -INFO:local_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7033 -INFO:local_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7039 -INFO:master_logger:Epoch[059/800], Step[0400/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7037 -INFO:local_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7034 -INFO:local_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7039 -INFO:local_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7031 -INFO:local_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7037 -INFO:master_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7035 -INFO:local_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7031 -INFO:local_logger:Epoch[059/800], Step[0500/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7035 -INFO:local_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7037 -INFO:local_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7035 -INFO:local_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7034 -INFO:master_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7034 -INFO:local_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7037 -INFO:local_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7034 -INFO:local_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7032 -INFO:local_logger:Epoch[059/800], Step[0600/0626], Avg Loss: 0.7030 -INFO:local_logger:----- Epoch[059/800], Train Loss: 0.7034, time: 855.63 -INFO:master_logger:----- Epoch[059/800], Train Loss: 0.7034, time: 855.63 -INFO:local_logger:----- Epoch[059/800], Train Loss: 0.7034, time: 859.41 -INFO:local_logger:Now training epoch 60. LR=0.000151 -INFO:local_logger:----- Epoch[059/800], Train Loss: 0.7035, time: 859.16 -INFO:local_logger:Now training epoch 60. LR=0.000151 -INFO:local_logger:----- Epoch[059/800], Train Loss: 0.7030, time: 859.83 -INFO:local_logger:Now training epoch 60. LR=0.000151 -INFO:local_logger:----- Epoch[059/800], Train Loss: 0.7033, time: 859.66 -INFO:local_logger:Now training epoch 60. LR=0.000151 -INFO:local_logger:----- Epoch[059/800], Train Loss: 0.7036, time: 859.94 -INFO:local_logger:Now training epoch 60. LR=0.000151 -INFO:local_logger:----- Epoch[059/800], Train Loss: 0.7037, time: 859.65 -INFO:local_logger:Now training epoch 60. LR=0.000151 -INFO:local_logger:----- Epoch[059/800], Train Loss: 0.7032, time: 859.97 -INFO:local_logger:Now training epoch 60. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-59-Loss-0.7033895635093321.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-59-Loss-0.7033895635093321.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-59-Loss-0.7033895635093321.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-59-Loss-0.7033895635093321.pdopt -INFO:local_logger:Now training epoch 60. LR=0.000151 -INFO:master_logger:Now training epoch 60. LR=0.000151 -INFO:local_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.7205 -INFO:local_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.7122 -INFO:local_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.7150 -INFO:master_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.7093 -INFO:local_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.6991 -INFO:local_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.7068 -INFO:local_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.7212 -INFO:local_logger:Epoch[060/800], Step[0000/0626], Avg Loss: 0.6992 -INFO:local_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7019 -INFO:local_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7031 -INFO:local_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7024 -INFO:local_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7027 -INFO:local_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7024 -INFO:local_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7041 -INFO:master_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7029 -INFO:local_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7026 -INFO:local_logger:Epoch[060/800], Step[0100/0626], Avg Loss: 0.7037 -INFO:local_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7033 -INFO:local_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7022 -INFO:local_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7043 -INFO:local_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7030 -INFO:master_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7029 -INFO:local_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[060/800], Step[0200/0626], Avg Loss: 0.7033 -INFO:local_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7030 -INFO:local_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7022 -INFO:local_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7026 -INFO:local_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7029 -INFO:local_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7034 -INFO:master_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7028 -INFO:local_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7027 -INFO:local_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[060/800], Step[0300/0626], Avg Loss: 0.7030 -INFO:local_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7024 -INFO:local_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7024 -INFO:local_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7026 -INFO:local_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7031 -INFO:local_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7029 -INFO:master_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7026 -INFO:local_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7028 -INFO:local_logger:Epoch[060/800], Step[0400/0626], Avg Loss: 0.7023 -INFO:local_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7026 -INFO:local_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7023 -INFO:local_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7029 -INFO:local_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7024 -INFO:local_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7029 -INFO:local_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7028 -INFO:local_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7022 -INFO:master_logger:Epoch[060/800], Step[0500/0626], Avg Loss: 0.7026 -INFO:local_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7026 -INFO:local_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7029 -INFO:local_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7025 -INFO:master_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7023 -INFO:local_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7021 -INFO:local_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7023 -INFO:local_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7024 -INFO:local_logger:Epoch[060/800], Step[0600/0626], Avg Loss: 0.7028 -INFO:local_logger:----- Epoch[060/800], Train Loss: 0.7020, time: 883.22 -INFO:local_logger:Now training epoch 61. LR=0.000151 -INFO:local_logger:----- Epoch[060/800], Train Loss: 0.7027, time: 884.34 -INFO:local_logger:Now training epoch 61. LR=0.000151 -INFO:local_logger:----- Epoch[060/800], Train Loss: 0.7025, time: 883.83 -INFO:local_logger:Now training epoch 61. LR=0.000151 -INFO:local_logger:----- Epoch[060/800], Train Loss: 0.7022, time: 883.86 -INFO:local_logger:Now training epoch 61. LR=0.000151 -INFO:local_logger:----- Epoch[060/800], Train Loss: 0.7030, time: 883.87 -INFO:local_logger:Now training epoch 61. LR=0.000151 -INFO:local_logger:----- Epoch[060/800], Train Loss: 0.7023, time: 883.92 -INFO:local_logger:Now training epoch 61. LR=0.000151 -INFO:local_logger:----- Epoch[060/800], Train Loss: 0.7024, time: 883.95 -INFO:local_logger:Now training epoch 61. LR=0.000151 -INFO:local_logger:----- Epoch[060/800], Train Loss: 0.7025, time: 880.74 -INFO:master_logger:----- Epoch[060/800], Train Loss: 0.7024, time: 880.74 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-60-Loss-0.7024735177213701.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-60-Loss-0.7024735177213701.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-60-Loss-0.7024735177213701.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-60-Loss-0.7024735177213701.pdopt -INFO:local_logger:Now training epoch 61. LR=0.000151 -INFO:master_logger:Now training epoch 61. LR=0.000151 -INFO:local_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.6994 -INFO:local_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.6940 -INFO:master_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.6945 -INFO:local_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.6741 -INFO:local_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.7017 -INFO:local_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.6991 -INFO:local_logger:Epoch[061/800], Step[0000/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7019 -INFO:local_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7020 -INFO:local_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7019 -INFO:local_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7011 -INFO:local_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7002 -INFO:local_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7016 -INFO:master_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7018 -INFO:local_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7024 -INFO:local_logger:Epoch[061/800], Step[0100/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7020 -INFO:local_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7028 -INFO:local_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7020 -INFO:local_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7015 -INFO:local_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7027 -INFO:local_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7022 -INFO:local_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7017 -INFO:local_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7011 -INFO:master_logger:Epoch[061/800], Step[0200/0626], Avg Loss: 0.7020 -INFO:local_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7016 -INFO:local_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7010 -INFO:local_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7024 -INFO:local_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7012 -INFO:local_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7020 -INFO:local_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7020 -INFO:master_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7018 -INFO:local_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7017 -INFO:local_logger:Epoch[061/800], Step[0300/0626], Avg Loss: 0.7026 -INFO:local_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7012 -INFO:local_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7012 -INFO:master_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7018 -INFO:local_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7019 -INFO:local_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7021 -INFO:local_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7020 -INFO:local_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7022 -INFO:local_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7016 -INFO:local_logger:Epoch[061/800], Step[0400/0626], Avg Loss: 0.7019 -INFO:local_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7012 -INFO:local_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7013 -INFO:local_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7017 -INFO:local_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7019 -INFO:local_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7022 -INFO:local_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7022 -INFO:master_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7017 -INFO:local_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7018 -INFO:local_logger:Epoch[061/800], Step[0500/0626], Avg Loss: 0.7015 -INFO:local_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7021 -INFO:local_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7013 -INFO:local_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7018 -INFO:local_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7013 -INFO:local_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7012 -INFO:local_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7015 -INFO:local_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7014 -INFO:master_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7016 -INFO:local_logger:Epoch[061/800], Step[0600/0626], Avg Loss: 0.7019 -INFO:local_logger:----- Epoch[061/800], Train Loss: 0.7015, time: 862.93 -INFO:local_logger:Now training epoch 62. LR=0.000151 -INFO:local_logger:----- Epoch[061/800], Train Loss: 0.7014, time: 863.80 -INFO:local_logger:Now training epoch 62. LR=0.000151 -INFO:local_logger:----- Epoch[061/800], Train Loss: 0.7013, time: 863.80 -INFO:local_logger:Now training epoch 62. LR=0.000151 -INFO:local_logger:----- Epoch[061/800], Train Loss: 0.7012, time: 860.38 -INFO:master_logger:----- Epoch[061/800], Train Loss: 0.7015, time: 860.38 -INFO:local_logger:----- Epoch[061/800], Train Loss: 0.7017, time: 864.16 -INFO:local_logger:Now training epoch 62. LR=0.000151 -INFO:local_logger:----- Epoch[061/800], Train Loss: 0.7012, time: 864.25 -INFO:local_logger:Now training epoch 62. LR=0.000151 -INFO:local_logger:----- Epoch[061/800], Train Loss: 0.7019, time: 865.44 -INFO:local_logger:Now training epoch 62. LR=0.000151 -INFO:local_logger:----- Epoch[061/800], Train Loss: 0.7021, time: 864.25 -INFO:local_logger:Now training epoch 62. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-61-Loss-0.7012385332086987.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-61-Loss-0.7012385332086987.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-61-Loss-0.7012385332086987.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-61-Loss-0.7012385332086987.pdopt -INFO:local_logger:Now training epoch 62. LR=0.000151 -INFO:master_logger:Now training epoch 62. LR=0.000151 -INFO:local_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.6976 -INFO:local_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.7062 -INFO:master_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.7003 -INFO:local_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.6922 -INFO:local_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.7018 -INFO:local_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.7010 -INFO:local_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.7098 -INFO:local_logger:Epoch[062/800], Step[0000/0626], Avg Loss: 0.7053 -INFO:local_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.7008 -INFO:local_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.7009 -INFO:local_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.7013 -INFO:local_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.7017 -INFO:local_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.6986 -INFO:master_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0100/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.7008 -INFO:local_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.7011 -INFO:local_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.6999 -INFO:local_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.6996 -INFO:local_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.7010 -INFO:local_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.7009 -INFO:master_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0200/0626], Avg Loss: 0.7012 -INFO:local_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.6999 -INFO:local_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.7012 -INFO:local_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.7001 -INFO:local_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.7012 -INFO:local_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.7011 -INFO:master_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.7007 -INFO:local_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.7008 -INFO:local_logger:Epoch[062/800], Step[0300/0626], Avg Loss: 0.7007 -INFO:local_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7004 -INFO:local_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7012 -INFO:local_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7000 -INFO:master_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7008 -INFO:local_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[062/800], Step[0400/0626], Avg Loss: 0.7011 -INFO:local_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7002 -INFO:local_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7014 -INFO:local_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7009 -INFO:local_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7009 -INFO:local_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7002 -INFO:master_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0500/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7014 -INFO:local_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7008 -INFO:local_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7001 -INFO:local_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7008 -INFO:local_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7006 -INFO:master_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7002 -INFO:local_logger:Epoch[062/800], Step[0600/0626], Avg Loss: 0.7006 -INFO:local_logger:----- Epoch[062/800], Train Loss: 0.7001, time: 880.83 -INFO:local_logger:Now training epoch 63. LR=0.000151 -INFO:local_logger:----- Epoch[062/800], Train Loss: 0.7001, time: 877.21 -INFO:master_logger:----- Epoch[062/800], Train Loss: 0.7005, time: 877.21 -INFO:local_logger:----- Epoch[062/800], Train Loss: 0.7008, time: 880.96 -INFO:local_logger:Now training epoch 63. LR=0.000151 -INFO:local_logger:----- Epoch[062/800], Train Loss: 0.7008, time: 881.01 -INFO:local_logger:Now training epoch 63. LR=0.000151 -INFO:local_logger:----- Epoch[062/800], Train Loss: 0.7006, time: 881.03 -INFO:local_logger:Now training epoch 63. LR=0.000151 -INFO:local_logger:----- Epoch[062/800], Train Loss: 0.7005, time: 881.40 -INFO:local_logger:Now training epoch 63. LR=0.000151 -INFO:local_logger:----- Epoch[062/800], Train Loss: 0.7001, time: 881.51 -INFO:local_logger:Now training epoch 63. LR=0.000151 -INFO:local_logger:----- Epoch[062/800], Train Loss: 0.7013, time: 882.39 -INFO:local_logger:Now training epoch 63. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-62-Loss-0.7001281701651857.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-62-Loss-0.7001281701651857.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-62-Loss-0.7001281701651857.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-62-Loss-0.7001281701651857.pdopt -INFO:local_logger:Now training epoch 63. LR=0.000151 -INFO:master_logger:Now training epoch 63. LR=0.000151 -INFO:local_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.7051 -INFO:local_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.7172 -INFO:local_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.6949 -INFO:master_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.7023 -INFO:local_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.7033 -INFO:local_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[063/800], Step[0000/0626], Avg Loss: 0.6763 -INFO:local_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.7004 -INFO:local_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.7001 -INFO:master_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.7002 -INFO:local_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[063/800], Step[0100/0626], Avg Loss: 0.7001 -INFO:local_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.7004 -INFO:local_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.6985 -INFO:local_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.7006 -INFO:local_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.6999 -INFO:master_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.6998 -INFO:local_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[063/800], Step[0200/0626], Avg Loss: 0.7001 -INFO:local_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.6999 -INFO:local_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.7002 -INFO:local_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.7013 -INFO:master_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.7001 -INFO:local_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.7002 -INFO:local_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.6989 -INFO:local_logger:Epoch[063/800], Step[0300/0626], Avg Loss: 0.6998 -INFO:local_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.6996 -INFO:local_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.6991 -INFO:local_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.7004 -INFO:local_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.7011 -INFO:local_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.7001 -INFO:master_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.6999 -INFO:local_logger:Epoch[063/800], Step[0400/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.6998 -INFO:local_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.7001 -INFO:local_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.7001 -INFO:local_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.7008 -INFO:master_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[063/800], Step[0500/0626], Avg Loss: 0.6994 -INFO:local_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.6993 -INFO:local_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.6993 -INFO:local_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.6998 -INFO:local_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.7005 -INFO:local_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.6997 -INFO:master_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[063/800], Step[0600/0626], Avg Loss: 0.6998 -INFO:local_logger:----- Epoch[063/800], Train Loss: 0.6997, time: 870.66 -INFO:master_logger:----- Epoch[063/800], Train Loss: 0.6997, time: 870.66 -INFO:local_logger:----- Epoch[063/800], Train Loss: 0.6998, time: 874.64 -INFO:local_logger:Now training epoch 64. LR=0.000151 -INFO:local_logger:----- Epoch[063/800], Train Loss: 0.6993, time: 874.57 -INFO:local_logger:Now training epoch 64. LR=0.000151 -INFO:local_logger:----- Epoch[063/800], Train Loss: 0.6998, time: 874.56 -INFO:local_logger:Now training epoch 64. LR=0.000151 -INFO:local_logger:----- Epoch[063/800], Train Loss: 0.6997, time: 874.53 -INFO:local_logger:Now training epoch 64. LR=0.000151 -INFO:local_logger:----- Epoch[063/800], Train Loss: 0.6993, time: 874.53 -INFO:local_logger:Now training epoch 64. LR=0.000151 -INFO:local_logger:----- Epoch[063/800], Train Loss: 0.6998, time: 874.79 -INFO:local_logger:Now training epoch 64. LR=0.000151 -INFO:local_logger:----- Epoch[063/800], Train Loss: 0.7004, time: 874.66 -INFO:local_logger:Now training epoch 64. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-63-Loss-0.6997380468063858.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-63-Loss-0.6997380468063858.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-63-Loss-0.6997380468063858.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-63-Loss-0.6997380468063858.pdopt -INFO:local_logger:Now training epoch 64. LR=0.000151 -INFO:master_logger:Now training epoch 64. LR=0.000151 -INFO:local_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.7015 -INFO:local_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.6893 -INFO:master_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.7009 -INFO:local_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.7021 -INFO:local_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.7124 -INFO:local_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.7052 -INFO:local_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.7017 -INFO:local_logger:Epoch[064/800], Step[0000/0626], Avg Loss: 0.6999 -INFO:local_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.7004 -INFO:local_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.6991 -INFO:local_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.6999 -INFO:local_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.6988 -INFO:local_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.6993 -INFO:master_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.6986 -INFO:local_logger:Epoch[064/800], Step[0100/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.6994 -INFO:local_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.6987 -INFO:local_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.7007 -INFO:local_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.6998 -INFO:master_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.6999 -INFO:local_logger:Epoch[064/800], Step[0200/0626], Avg Loss: 0.7003 -INFO:local_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6992 -INFO:local_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6998 -INFO:local_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6999 -INFO:local_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6986 -INFO:master_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6994 -INFO:local_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6991 -INFO:local_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6993 -INFO:local_logger:Epoch[064/800], Step[0300/0626], Avg Loss: 0.6998 -INFO:local_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6996 -INFO:local_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6997 -INFO:local_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6996 -INFO:local_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6992 -INFO:master_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6993 -INFO:local_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6992 -INFO:local_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6992 -INFO:local_logger:Epoch[064/800], Step[0400/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6989 -INFO:local_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6983 -INFO:local_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6994 -INFO:local_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6992 -INFO:local_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6996 -INFO:local_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6991 -INFO:local_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6988 -INFO:master_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6991 -INFO:local_logger:Epoch[064/800], Step[0500/0626], Avg Loss: 0.6994 -INFO:local_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6991 -INFO:local_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6992 -INFO:local_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6987 -INFO:local_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6993 -INFO:local_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6989 -INFO:master_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6990 -INFO:local_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6988 -INFO:local_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6993 -INFO:local_logger:Epoch[064/800], Step[0600/0626], Avg Loss: 0.6984 -INFO:local_logger:----- Epoch[064/800], Train Loss: 0.6992, time: 883.12 -INFO:local_logger:Now training epoch 65. LR=0.000151 -INFO:local_logger:----- Epoch[064/800], Train Loss: 0.6983, time: 883.49 -INFO:local_logger:Now training epoch 65. LR=0.000151 -INFO:local_logger:----- Epoch[064/800], Train Loss: 0.6991, time: 883.58 -INFO:local_logger:Now training epoch 65. LR=0.000151 -INFO:local_logger:----- Epoch[064/800], Train Loss: 0.6987, time: 883.60 -INFO:local_logger:Now training epoch 65. LR=0.000151 -INFO:local_logger:----- Epoch[064/800], Train Loss: 0.6992, time: 880.22 -INFO:master_logger:----- Epoch[064/800], Train Loss: 0.6990, time: 880.22 -INFO:local_logger:----- Epoch[064/800], Train Loss: 0.6988, time: 883.73 -INFO:local_logger:Now training epoch 65. LR=0.000151 -INFO:local_logger:----- Epoch[064/800], Train Loss: 0.6989, time: 883.71 -INFO:local_logger:Now training epoch 65. LR=0.000151 -INFO:local_logger:----- Epoch[064/800], Train Loss: 0.6993, time: 883.76 -INFO:local_logger:Now training epoch 65. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-64-Loss-0.6992388257000982.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-64-Loss-0.6992388257000982.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-64-Loss-0.6992388257000982.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-64-Loss-0.6992388257000982.pdopt -INFO:local_logger:Now training epoch 65. LR=0.000151 -INFO:master_logger:Now training epoch 65. LR=0.000151 -INFO:local_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.7020 -INFO:local_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.7021 -INFO:local_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.7097 -INFO:master_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.7040 -INFO:local_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.6936 -INFO:local_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.6956 -INFO:local_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[065/800], Step[0000/0626], Avg Loss: 0.6983 -INFO:local_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6990 -INFO:local_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6990 -INFO:local_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6976 -INFO:master_logger:Epoch[065/800], Step[0100/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6989 -INFO:local_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6979 -INFO:local_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6986 -INFO:local_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6989 -INFO:local_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6983 -INFO:master_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6985 -INFO:local_logger:Epoch[065/800], Step[0200/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6983 -INFO:local_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6990 -INFO:local_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6986 -INFO:local_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6982 -INFO:master_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[065/800], Step[0300/0626], Avg Loss: 0.6979 -INFO:local_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6985 -INFO:local_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6990 -INFO:master_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6982 -INFO:local_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6978 -INFO:local_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0400/0626], Avg Loss: 0.6982 -INFO:local_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6983 -INFO:local_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6990 -INFO:local_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6976 -INFO:local_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6980 -INFO:master_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0500/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6982 -INFO:local_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6982 -INFO:local_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6988 -INFO:master_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6978 -INFO:local_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6975 -INFO:local_logger:Epoch[065/800], Step[0600/0626], Avg Loss: 0.6981 -INFO:local_logger:----- Epoch[065/800], Train Loss: 0.6981, time: 871.05 -INFO:local_logger:Now training epoch 66. LR=0.000151 -INFO:local_logger:----- Epoch[065/800], Train Loss: 0.6988, time: 867.64 -INFO:master_logger:----- Epoch[065/800], Train Loss: 0.6980, time: 867.64 -INFO:local_logger:----- Epoch[065/800], Train Loss: 0.6977, time: 871.33 -INFO:local_logger:Now training epoch 66. LR=0.000151 -INFO:local_logger:----- Epoch[065/800], Train Loss: 0.6980, time: 872.01 -INFO:local_logger:Now training epoch 66. LR=0.000151 -INFO:local_logger:----- Epoch[065/800], Train Loss: 0.6975, time: 871.43 -INFO:local_logger:Now training epoch 66. LR=0.000151 -INFO:local_logger:----- Epoch[065/800], Train Loss: 0.6982, time: 871.92 -INFO:local_logger:Now training epoch 66. LR=0.000151 -INFO:local_logger:----- Epoch[065/800], Train Loss: 0.6981, time: 871.81 -INFO:local_logger:Now training epoch 66. LR=0.000151 -INFO:local_logger:----- Epoch[065/800], Train Loss: 0.6978, time: 871.98 -INFO:local_logger:Now training epoch 66. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-65-Loss-0.6988199688404415.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-65-Loss-0.6988199688404415.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-65-Loss-0.6988199688404415.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-65-Loss-0.6988199688404415.pdopt -INFO:local_logger:Now training epoch 66. LR=0.000151 -INFO:master_logger:Now training epoch 66. LR=0.000151 -INFO:local_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.6946 -INFO:local_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.6997 -INFO:master_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.6995 -INFO:local_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.6986 -INFO:local_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.7179 -INFO:local_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.7034 -INFO:local_logger:Epoch[066/800], Step[0000/0626], Avg Loss: 0.6970 -INFO:local_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6978 -INFO:local_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6987 -INFO:master_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6973 -INFO:local_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6973 -INFO:local_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6979 -INFO:local_logger:Epoch[066/800], Step[0100/0626], Avg Loss: 0.6965 -INFO:local_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6971 -INFO:local_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6974 -INFO:local_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6976 -INFO:local_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6988 -INFO:local_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6973 -INFO:local_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6983 -INFO:master_logger:Epoch[066/800], Step[0200/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6978 -INFO:local_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6973 -INFO:local_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6976 -INFO:local_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6971 -INFO:local_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6975 -INFO:local_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6984 -INFO:master_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[066/800], Step[0300/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6973 -INFO:local_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6975 -INFO:local_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6974 -INFO:local_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6970 -INFO:master_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6976 -INFO:local_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[066/800], Step[0400/0626], Avg Loss: 0.6979 -INFO:local_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6978 -INFO:local_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6971 -INFO:local_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6970 -INFO:master_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6974 -INFO:local_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6975 -INFO:local_logger:Epoch[066/800], Step[0500/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6971 -INFO:local_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6976 -INFO:local_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6970 -INFO:local_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6976 -INFO:local_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6970 -INFO:local_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6975 -INFO:master_logger:Epoch[066/800], Step[0600/0626], Avg Loss: 0.6973 -INFO:local_logger:----- Epoch[066/800], Train Loss: 0.6970, time: 874.18 -INFO:master_logger:----- Epoch[066/800], Train Loss: 0.6973, time: 874.18 -INFO:local_logger:----- Epoch[066/800], Train Loss: 0.6976, time: 878.36 -INFO:local_logger:Now training epoch 67. LR=0.000151 -INFO:local_logger:----- Epoch[066/800], Train Loss: 0.6969, time: 877.55 -INFO:local_logger:Now training epoch 67. LR=0.000151 -INFO:local_logger:----- Epoch[066/800], Train Loss: 0.6971, time: 878.04 -INFO:local_logger:Now training epoch 67. LR=0.000151 -INFO:local_logger:----- Epoch[066/800], Train Loss: 0.6977, time: 877.55 -INFO:local_logger:Now training epoch 67. LR=0.000151 -INFO:local_logger:----- Epoch[066/800], Train Loss: 0.6970, time: 878.01 -INFO:local_logger:Now training epoch 67. LR=0.000151 -INFO:local_logger:----- Epoch[066/800], Train Loss: 0.6975, time: 878.03 -INFO:local_logger:Now training epoch 67. LR=0.000151 -INFO:local_logger:----- Epoch[066/800], Train Loss: 0.6975, time: 878.01 -INFO:local_logger:Now training epoch 67. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-66-Loss-0.6970274622691075.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-66-Loss-0.6970274622691075.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-66-Loss-0.6970274622691075.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-66-Loss-0.6970274622691075.pdopt -INFO:local_logger:Now training epoch 67. LR=0.000151 -INFO:master_logger:Now training epoch 67. LR=0.000151 -INFO:local_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.6978 -INFO:master_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.6925 -INFO:local_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.6998 -INFO:local_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.7094 -INFO:local_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.6943 -INFO:local_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.6945 -INFO:local_logger:Epoch[067/800], Step[0000/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6981 -INFO:local_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6969 -INFO:local_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6956 -INFO:local_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6962 -INFO:master_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[067/800], Step[0100/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6973 -INFO:local_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6974 -INFO:local_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6971 -INFO:master_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6974 -INFO:local_logger:Epoch[067/800], Step[0200/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6969 -INFO:local_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6967 -INFO:local_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6973 -INFO:local_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6964 -INFO:master_logger:Epoch[067/800], Step[0300/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6964 -INFO:local_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6971 -INFO:local_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6967 -INFO:local_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6966 -INFO:master_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[067/800], Step[0400/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6959 -INFO:local_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6965 -INFO:local_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6964 -INFO:local_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6968 -INFO:master_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[067/800], Step[0500/0626], Avg Loss: 0.6971 -INFO:local_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6965 -INFO:local_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6970 -INFO:local_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6965 -INFO:local_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6965 -INFO:local_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6970 -INFO:master_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6965 -INFO:local_logger:Epoch[067/800], Step[0600/0626], Avg Loss: 0.6961 -INFO:local_logger:----- Epoch[067/800], Train Loss: 0.6970, time: 875.35 -INFO:local_logger:Now training epoch 68. LR=0.000151 -INFO:local_logger:----- Epoch[067/800], Train Loss: 0.6968, time: 872.12 -INFO:master_logger:----- Epoch[067/800], Train Loss: 0.6965, time: 872.12 -INFO:local_logger:----- Epoch[067/800], Train Loss: 0.6965, time: 876.01 -INFO:local_logger:Now training epoch 68. LR=0.000151 -INFO:local_logger:----- Epoch[067/800], Train Loss: 0.6965, time: 875.96 -INFO:local_logger:Now training epoch 68. LR=0.000151 -INFO:local_logger:----- Epoch[067/800], Train Loss: 0.6970, time: 875.59 -INFO:local_logger:Now training epoch 68. LR=0.000151 -INFO:local_logger:----- Epoch[067/800], Train Loss: 0.6961, time: 876.05 -INFO:local_logger:Now training epoch 68. LR=0.000151 -INFO:local_logger:----- Epoch[067/800], Train Loss: 0.6965, time: 875.92 -INFO:local_logger:Now training epoch 68. LR=0.000151 -INFO:local_logger:----- Epoch[067/800], Train Loss: 0.6960, time: 876.13 -INFO:local_logger:Now training epoch 68. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-67-Loss-0.696807305239932.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-67-Loss-0.696807305239932.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-67-Loss-0.696807305239932.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-67-Loss-0.696807305239932.pdopt -INFO:local_logger:Now training epoch 68. LR=0.000151 -INFO:master_logger:Now training epoch 68. LR=0.000151 -INFO:local_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.6982 -INFO:local_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.7004 -INFO:master_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.6972 -INFO:local_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.7004 -INFO:local_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.7004 -INFO:local_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.7036 -INFO:local_logger:Epoch[068/800], Step[0000/0626], Avg Loss: 0.6894 -INFO:local_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6969 -INFO:master_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6964 -INFO:local_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6970 -INFO:local_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[068/800], Step[0100/0626], Avg Loss: 0.6976 -INFO:local_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6962 -INFO:local_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6954 -INFO:local_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6952 -INFO:local_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6969 -INFO:local_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6954 -INFO:master_logger:Epoch[068/800], Step[0200/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6964 -INFO:local_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6963 -INFO:local_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6970 -INFO:local_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6958 -INFO:master_logger:Epoch[068/800], Step[0300/0626], Avg Loss: 0.6959 -INFO:local_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6963 -INFO:local_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6962 -INFO:local_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6956 -INFO:local_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6965 -INFO:local_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6959 -INFO:master_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6959 -INFO:local_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[068/800], Step[0400/0626], Avg Loss: 0.6955 -INFO:local_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6962 -INFO:local_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6954 -INFO:local_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6946 -INFO:local_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6965 -INFO:local_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6959 -INFO:local_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6957 -INFO:master_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[068/800], Step[0500/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6946 -INFO:local_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6957 -INFO:local_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6957 -INFO:local_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6962 -INFO:local_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6960 -INFO:master_logger:Epoch[068/800], Step[0600/0626], Avg Loss: 0.6957 -INFO:local_logger:----- Epoch[068/800], Train Loss: 0.6946, time: 870.06 -INFO:local_logger:Now training epoch 69. LR=0.000151 -INFO:local_logger:----- Epoch[068/800], Train Loss: 0.6959, time: 870.67 -INFO:local_logger:----- Epoch[068/800], Train Loss: 0.6958, time: 871.34 -INFO:local_logger:Now training epoch 69. LR=0.000151 -INFO:local_logger:Now training epoch 69. LR=0.000151 -INFO:local_logger:----- Epoch[068/800], Train Loss: 0.6961, time: 870.65 -INFO:local_logger:Now training epoch 69. LR=0.000151 -INFO:local_logger:----- Epoch[068/800], Train Loss: 0.6958, time: 870.67 -INFO:local_logger:Now training epoch 69. LR=0.000151 -INFO:local_logger:----- Epoch[068/800], Train Loss: 0.6961, time: 870.74 -INFO:local_logger:Now training epoch 69. LR=0.000151 -INFO:local_logger:----- Epoch[068/800], Train Loss: 0.6960, time: 870.73 -INFO:local_logger:Now training epoch 69. LR=0.000151 -INFO:local_logger:----- Epoch[068/800], Train Loss: 0.6957, time: 867.42 -INFO:master_logger:----- Epoch[068/800], Train Loss: 0.6957, time: 867.42 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-68-Loss-0.6956601794348751.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-68-Loss-0.6956601794348751.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-68-Loss-0.6956601794348751.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-68-Loss-0.6956601794348751.pdopt -INFO:local_logger:Now training epoch 69. LR=0.000151 -INFO:master_logger:Now training epoch 69. LR=0.000151 -INFO:local_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.6951 -INFO:local_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.6998 -INFO:local_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.6932 -INFO:master_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.7177 -INFO:local_logger:Epoch[069/800], Step[0000/0626], Avg Loss: 0.6914 -INFO:local_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6957 -INFO:local_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6964 -INFO:local_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6955 -INFO:local_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6955 -INFO:local_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6945 -INFO:local_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6959 -INFO:master_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6956 -INFO:local_logger:Epoch[069/800], Step[0100/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6954 -INFO:local_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6949 -INFO:master_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[069/800], Step[0200/0626], Avg Loss: 0.6945 -INFO:local_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6953 -INFO:local_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6944 -INFO:master_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6951 -INFO:local_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[069/800], Step[0300/0626], Avg Loss: 0.6946 -INFO:local_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6952 -INFO:local_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6953 -INFO:master_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6952 -INFO:local_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[069/800], Step[0400/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6956 -INFO:local_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6954 -INFO:local_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6955 -INFO:local_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6950 -INFO:master_logger:Epoch[069/800], Step[0500/0626], Avg Loss: 0.6951 -INFO:local_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6946 -INFO:local_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6957 -INFO:local_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6947 -INFO:master_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6953 -INFO:local_logger:Epoch[069/800], Step[0600/0626], Avg Loss: 0.6953 -INFO:local_logger:----- Epoch[069/800], Train Loss: 0.6957, time: 877.32 -INFO:local_logger:Now training epoch 70. LR=0.000151 -INFO:local_logger:----- Epoch[069/800], Train Loss: 0.6947, time: 876.82 -INFO:local_logger:Now training epoch 70. LR=0.000151 -INFO:local_logger:----- Epoch[069/800], Train Loss: 0.6954, time: 877.40 -INFO:local_logger:Now training epoch 70. LR=0.000151 -INFO:local_logger:----- Epoch[069/800], Train Loss: 0.6947, time: 877.36 -INFO:local_logger:Now training epoch 70. LR=0.000151 -INFO:local_logger:----- Epoch[069/800], Train Loss: 0.6953, time: 877.48 -INFO:local_logger:Now training epoch 70. LR=0.000151 -INFO:local_logger:----- Epoch[069/800], Train Loss: 0.6948, time: 877.49 -INFO:local_logger:Now training epoch 70. LR=0.000151 -INFO:local_logger:----- Epoch[069/800], Train Loss: 0.6945, time: 877.44 -INFO:local_logger:Now training epoch 70. LR=0.000151 -INFO:local_logger:----- Epoch[069/800], Train Loss: 0.6950, time: 873.74 -INFO:master_logger:----- Epoch[069/800], Train Loss: 0.6950, time: 873.74 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-69-Loss-0.6949500013049544.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-69-Loss-0.6949500013049544.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-69-Loss-0.6949500013049544.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-69-Loss-0.6949500013049544.pdopt -INFO:local_logger:Now training epoch 70. LR=0.000151 -INFO:master_logger:Now training epoch 70. LR=0.000151 -INFO:local_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.6925 -INFO:local_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.6983 -INFO:master_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.6937 -INFO:local_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.7030 -INFO:local_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.7050 -INFO:local_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.6782 -INFO:local_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.6987 -INFO:local_logger:Epoch[070/800], Step[0000/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6955 -INFO:local_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6946 -INFO:master_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6943 -INFO:local_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6940 -INFO:local_logger:Epoch[070/800], Step[0100/0626], Avg Loss: 0.6951 -INFO:local_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6956 -INFO:local_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6952 -INFO:local_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6944 -INFO:master_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6951 -INFO:local_logger:Epoch[070/800], Step[0200/0626], Avg Loss: 0.6958 -INFO:local_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6945 -INFO:local_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6952 -INFO:local_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6946 -INFO:master_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6959 -INFO:local_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6953 -INFO:local_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[070/800], Step[0300/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6945 -INFO:local_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6946 -INFO:local_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6954 -INFO:local_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6943 -INFO:local_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6951 -INFO:local_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6942 -INFO:local_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6949 -INFO:master_logger:Epoch[070/800], Step[0400/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6942 -INFO:local_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6954 -INFO:local_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6941 -INFO:master_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6946 -INFO:local_logger:Epoch[070/800], Step[0500/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6942 -INFO:local_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6941 -INFO:master_logger:Epoch[070/800], Step[0600/0626], Avg Loss: 0.6945 -INFO:local_logger:----- Epoch[070/800], Train Loss: 0.6942, time: 864.91 -INFO:local_logger:Now training epoch 71. LR=0.000151 -INFO:local_logger:----- Epoch[070/800], Train Loss: 0.6942, time: 865.61 -INFO:local_logger:Now training epoch 71. LR=0.000151 -INFO:local_logger:----- Epoch[070/800], Train Loss: 0.6941, time: 865.52 -INFO:local_logger:Now training epoch 71. LR=0.000151 -INFO:local_logger:----- Epoch[070/800], Train Loss: 0.6949, time: 864.93 -INFO:local_logger:Now training epoch 71. LR=0.000151 -INFO:local_logger:----- Epoch[070/800], Train Loss: 0.6946, time: 861.12 -INFO:master_logger:----- Epoch[070/800], Train Loss: 0.6945, time: 861.12 -INFO:local_logger:----- Epoch[070/800], Train Loss: 0.6945, time: 864.92 -INFO:local_logger:Now training epoch 71. LR=0.000151 -INFO:local_logger:----- Epoch[070/800], Train Loss: 0.6947, time: 864.88 -INFO:local_logger:Now training epoch 71. LR=0.000151 -INFO:local_logger:----- Epoch[070/800], Train Loss: 0.6948, time: 864.89 -INFO:local_logger:Now training epoch 71. LR=0.000151 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-70-Loss-0.6946037372341894.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-70-Loss-0.6946037372341894.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-70-Loss-0.6946037372341894.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-70-Loss-0.6946037372341894.pdopt -INFO:local_logger:Now training epoch 71. LR=0.000151 -INFO:master_logger:Now training epoch 71. LR=0.000151 -INFO:local_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.6850 -INFO:master_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.6757 -INFO:local_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.7043 -INFO:local_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.7002 -INFO:local_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.6977 -INFO:local_logger:Epoch[071/800], Step[0000/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6936 -INFO:local_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6950 -INFO:local_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6932 -INFO:local_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6924 -INFO:local_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6937 -INFO:master_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[071/800], Step[0100/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6943 -INFO:local_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6942 -INFO:local_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6935 -INFO:local_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6942 -INFO:local_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6931 -INFO:local_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6936 -INFO:master_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[071/800], Step[0200/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6945 -INFO:local_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6937 -INFO:local_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6944 -INFO:master_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6937 -INFO:local_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[071/800], Step[0300/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6946 -INFO:local_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6931 -INFO:local_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6940 -INFO:local_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6931 -INFO:local_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6940 -INFO:master_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[071/800], Step[0400/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6945 -INFO:local_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6937 -INFO:local_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6944 -INFO:master_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[071/800], Step[0500/0626], Avg Loss: 0.6932 -INFO:local_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6937 -INFO:master_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[071/800], Step[0600/0626], Avg Loss: 0.6943 -INFO:local_logger:----- Epoch[071/800], Train Loss: 0.6943, time: 885.74 -INFO:local_logger:Now training epoch 72. LR=0.000152 -INFO:local_logger:----- Epoch[071/800], Train Loss: 0.6939, time: 886.70 -INFO:local_logger:Now training epoch 72. LR=0.000152 -INFO:local_logger:----- Epoch[071/800], Train Loss: 0.6940, time: 886.74 -INFO:local_logger:----- Epoch[071/800], Train Loss: 0.6935, time: 886.79 -INFO:local_logger:Now training epoch 72. LR=0.000152 -INFO:local_logger:Now training epoch 72. LR=0.000152 -INFO:local_logger:----- Epoch[071/800], Train Loss: 0.6938, time: 886.70 -INFO:local_logger:Now training epoch 72. LR=0.000152 -INFO:local_logger:----- Epoch[071/800], Train Loss: 0.6934, time: 886.76 -INFO:local_logger:Now training epoch 72. LR=0.000152 -INFO:local_logger:----- Epoch[071/800], Train Loss: 0.6937, time: 882.95 -INFO:master_logger:----- Epoch[071/800], Train Loss: 0.6939, time: 882.95 -INFO:local_logger:----- Epoch[071/800], Train Loss: 0.6945, time: 886.78 -INFO:local_logger:Now training epoch 72. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-71-Loss-0.6937192819392223.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-71-Loss-0.6937192819392223.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-71-Loss-0.6937192819392223.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-71-Loss-0.6937192819392223.pdopt -INFO:local_logger:Now training epoch 72. LR=0.000152 -INFO:master_logger:Now training epoch 72. LR=0.000152 -INFO:local_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.6943 -INFO:master_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.6974 -INFO:local_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.7013 -INFO:local_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.6924 -INFO:local_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.7045 -INFO:local_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.6954 -INFO:local_logger:Epoch[072/800], Step[0000/0626], Avg Loss: 0.7056 -INFO:local_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6924 -INFO:local_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6944 -INFO:master_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[072/800], Step[0100/0626], Avg Loss: 0.6936 -INFO:local_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6936 -INFO:local_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6937 -INFO:master_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6935 -INFO:local_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[072/800], Step[0200/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6932 -INFO:local_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6936 -INFO:local_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6935 -INFO:master_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6942 -INFO:local_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6931 -INFO:local_logger:Epoch[072/800], Step[0300/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6931 -INFO:local_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6936 -INFO:local_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6930 -INFO:master_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[072/800], Step[0400/0626], Avg Loss: 0.6926 -INFO:local_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6937 -INFO:master_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[072/800], Step[0500/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6936 -INFO:local_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6937 -INFO:master_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[072/800], Step[0600/0626], Avg Loss: 0.6937 -INFO:local_logger:----- Epoch[072/800], Train Loss: 0.6928, time: 870.67 -INFO:local_logger:Now training epoch 73. LR=0.000152 -INFO:local_logger:----- Epoch[072/800], Train Loss: 0.6930, time: 869.72 -INFO:local_logger:Now training epoch 73. LR=0.000152 -INFO:local_logger:----- Epoch[072/800], Train Loss: 0.6936, time: 869.72 -INFO:local_logger:Now training epoch 73. LR=0.000152 -INFO:local_logger:----- Epoch[072/800], Train Loss: 0.6930, time: 869.77 -INFO:local_logger:Now training epoch 73. LR=0.000152 -INFO:local_logger:----- Epoch[072/800], Train Loss: 0.6934, time: 870.14 -INFO:local_logger:Now training epoch 73. LR=0.000152 -INFO:local_logger:----- Epoch[072/800], Train Loss: 0.6936, time: 870.14 -INFO:local_logger:Now training epoch 73. LR=0.000152 -INFO:local_logger:----- Epoch[072/800], Train Loss: 0.6936, time: 866.43 -INFO:local_logger:----- Epoch[072/800], Train Loss: 0.6929, time: 870.14 -INFO:master_logger:----- Epoch[072/800], Train Loss: 0.6932, time: 866.43 -INFO:local_logger:Now training epoch 73. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-72-Loss-0.693554089917601.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-72-Loss-0.693554089917601.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-72-Loss-0.693554089917601.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-72-Loss-0.693554089917601.pdopt -INFO:local_logger:Now training epoch 73. LR=0.000152 -INFO:master_logger:Now training epoch 73. LR=0.000152 -INFO:local_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.6943 -INFO:master_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.6940 -INFO:local_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.6941 -INFO:local_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.6980 -INFO:local_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.6944 -INFO:local_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[073/800], Step[0000/0626], Avg Loss: 0.7038 -INFO:local_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6935 -INFO:local_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6940 -INFO:master_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[073/800], Step[0100/0626], Avg Loss: 0.6922 -INFO:local_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6924 -INFO:local_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6925 -INFO:master_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6925 -INFO:local_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[073/800], Step[0200/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6932 -INFO:local_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6922 -INFO:local_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6937 -INFO:master_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[073/800], Step[0300/0626], Avg Loss: 0.6926 -INFO:local_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6923 -INFO:local_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6917 -INFO:local_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6933 -INFO:master_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[073/800], Step[0400/0626], Avg Loss: 0.6926 -INFO:local_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6920 -INFO:local_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6923 -INFO:local_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6931 -INFO:local_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6929 -INFO:master_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6926 -INFO:local_logger:Epoch[073/800], Step[0500/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6928 -INFO:master_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6925 -INFO:local_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[073/800], Step[0600/0626], Avg Loss: 0.6929 -INFO:local_logger:----- Epoch[073/800], Train Loss: 0.6926, time: 884.48 -INFO:local_logger:Now training epoch 74. LR=0.000152 -INFO:local_logger:----- Epoch[073/800], Train Loss: 0.6921, time: 884.48 -INFO:local_logger:Now training epoch 74. LR=0.000152 -INFO:local_logger:----- Epoch[073/800], Train Loss: 0.6928, time: 884.08 -INFO:local_logger:Now training epoch 74. LR=0.000152 -INFO:local_logger:----- Epoch[073/800], Train Loss: 0.6926, time: 880.49 -INFO:master_logger:----- Epoch[073/800], Train Loss: 0.6925, time: 880.49 -INFO:local_logger:----- Epoch[073/800], Train Loss: 0.6917, time: 884.29 -INFO:local_logger:Now training epoch 74. LR=0.000152 -INFO:local_logger:----- Epoch[073/800], Train Loss: 0.6920, time: 884.32 -INFO:local_logger:Now training epoch 74. LR=0.000152 -INFO:local_logger:----- Epoch[073/800], Train Loss: 0.6929, time: 884.79 -INFO:local_logger:Now training epoch 74. LR=0.000152 -INFO:local_logger:----- Epoch[073/800], Train Loss: 0.6930, time: 884.73 -INFO:local_logger:Now training epoch 74. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-73-Loss-0.6925669066549239.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-73-Loss-0.6925669066549239.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-73-Loss-0.6925669066549239.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-73-Loss-0.6925669066549239.pdopt -INFO:local_logger:Now training epoch 74. LR=0.000152 -INFO:master_logger:Now training epoch 74. LR=0.000152 -INFO:local_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.6904 -INFO:local_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.6894 -INFO:master_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.7013 -INFO:local_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.6899 -INFO:local_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.6982 -INFO:local_logger:Epoch[074/800], Step[0000/0626], Avg Loss: 0.7004 -INFO:local_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6923 -INFO:local_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6927 -INFO:master_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6920 -INFO:local_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6914 -INFO:local_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[074/800], Step[0100/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6924 -INFO:local_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6925 -INFO:local_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6917 -INFO:local_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6916 -INFO:master_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[074/800], Step[0200/0626], Avg Loss: 0.6927 -INFO:local_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6920 -INFO:local_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6929 -INFO:master_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6920 -INFO:local_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6923 -INFO:local_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6924 -INFO:local_logger:Epoch[074/800], Step[0300/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6925 -INFO:local_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6926 -INFO:local_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6920 -INFO:master_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6920 -INFO:local_logger:Epoch[074/800], Step[0400/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6922 -INFO:local_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6923 -INFO:local_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6922 -INFO:local_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6920 -INFO:master_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6920 -INFO:local_logger:Epoch[074/800], Step[0500/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6923 -INFO:local_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6917 -INFO:local_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6917 -INFO:local_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6922 -INFO:master_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6920 -INFO:local_logger:Epoch[074/800], Step[0600/0626], Avg Loss: 0.6922 -INFO:local_logger:----- Epoch[074/800], Train Loss: 0.6922, time: 868.83 -INFO:local_logger:Now training epoch 75. LR=0.000152 -INFO:local_logger:----- Epoch[074/800], Train Loss: 0.6922, time: 869.13 -INFO:local_logger:Now training epoch 75. LR=0.000152 -INFO:local_logger:----- Epoch[074/800], Train Loss: 0.6916, time: 868.88 -INFO:local_logger:Now training epoch 75. LR=0.000152 -INFO:local_logger:----- Epoch[074/800], Train Loss: 0.6920, time: 868.87 -INFO:local_logger:Now training epoch 75. LR=0.000152 -INFO:local_logger:----- Epoch[074/800], Train Loss: 0.6917, time: 869.16 -INFO:local_logger:Now training epoch 75. LR=0.000152 -INFO:local_logger:----- Epoch[074/800], Train Loss: 0.6917, time: 868.97 -INFO:local_logger:Now training epoch 75. LR=0.000152 -INFO:local_logger:----- Epoch[074/800], Train Loss: 0.6918, time: 865.36 -INFO:master_logger:----- Epoch[074/800], Train Loss: 0.6919, time: 865.36 -INFO:local_logger:----- Epoch[074/800], Train Loss: 0.6922, time: 869.28 -INFO:local_logger:Now training epoch 75. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-74-Loss-0.6918195025700205.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-74-Loss-0.6918195025700205.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-74-Loss-0.6918195025700205.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-74-Loss-0.6918195025700205.pdopt -INFO:local_logger:Now training epoch 75. LR=0.000152 -INFO:master_logger:Now training epoch 75. LR=0.000152 -INFO:local_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6855 -INFO:master_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6909 -INFO:local_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6973 -INFO:local_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6943 -INFO:local_logger:Epoch[075/800], Step[0000/0626], Avg Loss: 0.6994 -INFO:local_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6910 -INFO:local_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6919 -INFO:local_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6928 -INFO:master_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6914 -INFO:local_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[075/800], Step[0100/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6914 -INFO:local_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6911 -INFO:master_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[075/800], Step[0200/0626], Avg Loss: 0.6904 -INFO:local_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6911 -INFO:local_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6913 -INFO:master_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[075/800], Step[0300/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6910 -INFO:local_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6917 -INFO:local_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6910 -INFO:local_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6919 -INFO:master_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6914 -INFO:local_logger:Epoch[075/800], Step[0400/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6911 -INFO:local_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6917 -INFO:local_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6911 -INFO:local_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6911 -INFO:local_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6913 -INFO:master_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[075/800], Step[0500/0626], Avg Loss: 0.6904 -INFO:local_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6916 -INFO:local_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6910 -INFO:master_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[075/800], Step[0600/0626], Avg Loss: 0.6913 -INFO:local_logger:----- Epoch[075/800], Train Loss: 0.6918, time: 889.31 -INFO:local_logger:Now training epoch 76. LR=0.000152 -INFO:local_logger:----- Epoch[075/800], Train Loss: 0.6917, time: 889.53 -INFO:local_logger:Now training epoch 76. LR=0.000152 -INFO:local_logger:----- Epoch[075/800], Train Loss: 0.6907, time: 889.63 -INFO:local_logger:Now training epoch 76. LR=0.000152 -INFO:local_logger:----- Epoch[075/800], Train Loss: 0.6910, time: 885.80 -INFO:master_logger:----- Epoch[075/800], Train Loss: 0.6913, time: 885.80 -INFO:local_logger:----- Epoch[075/800], Train Loss: 0.6913, time: 889.63 -INFO:local_logger:Now training epoch 76. LR=0.000152 -INFO:local_logger:----- Epoch[075/800], Train Loss: 0.6912, time: 890.10 -INFO:local_logger:Now training epoch 76. LR=0.000152 -INFO:local_logger:----- Epoch[075/800], Train Loss: 0.6916, time: 890.11 -INFO:local_logger:Now training epoch 76. LR=0.000152 -INFO:local_logger:----- Epoch[075/800], Train Loss: 0.6911, time: 890.09 -INFO:local_logger:Now training epoch 76. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-75-Loss-0.6910110246189355.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-75-Loss-0.6910110246189355.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-75-Loss-0.6910110246189355.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-75-Loss-0.6910110246189355.pdopt -INFO:local_logger:Now training epoch 76. LR=0.000152 -INFO:master_logger:Now training epoch 76. LR=0.000152 -INFO:local_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6949 -INFO:local_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6967 -INFO:master_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6921 -INFO:local_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6918 -INFO:local_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6833 -INFO:local_logger:Epoch[076/800], Step[0000/0626], Avg Loss: 0.6889 -INFO:local_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6912 -INFO:master_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0100/0626], Avg Loss: 0.6922 -INFO:local_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6914 -INFO:local_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6904 -INFO:local_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6911 -INFO:local_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6909 -INFO:local_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6909 -INFO:local_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6905 -INFO:master_logger:Epoch[076/800], Step[0200/0626], Avg Loss: 0.6909 -INFO:local_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6911 -INFO:local_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6903 -INFO:local_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6903 -INFO:local_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6906 -INFO:master_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[076/800], Step[0300/0626], Avg Loss: 0.6910 -INFO:local_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6901 -INFO:local_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6908 -INFO:master_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[076/800], Step[0400/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6911 -INFO:local_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6903 -INFO:master_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0500/0626], Avg Loss: 0.6910 -INFO:local_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6904 -INFO:local_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6902 -INFO:master_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6910 -INFO:local_logger:Epoch[076/800], Step[0600/0626], Avg Loss: 0.6903 -INFO:local_logger:----- Epoch[076/800], Train Loss: 0.6911, time: 859.45 -INFO:local_logger:Now training epoch 77. LR=0.000152 -INFO:local_logger:----- Epoch[076/800], Train Loss: 0.6906, time: 855.76 -INFO:master_logger:----- Epoch[076/800], Train Loss: 0.6906, time: 855.76 -INFO:local_logger:----- Epoch[076/800], Train Loss: 0.6910, time: 859.92 -INFO:local_logger:Now training epoch 77. LR=0.000152 -INFO:local_logger:----- Epoch[076/800], Train Loss: 0.6907, time: 859.58 -INFO:local_logger:Now training epoch 77. LR=0.000152 -INFO:local_logger:----- Epoch[076/800], Train Loss: 0.6906, time: 859.59 -INFO:local_logger:Now training epoch 77. LR=0.000152 -INFO:local_logger:----- Epoch[076/800], Train Loss: 0.6903, time: 860.18 -INFO:local_logger:Now training epoch 77. LR=0.000152 -INFO:local_logger:----- Epoch[076/800], Train Loss: 0.6903, time: 860.24 -INFO:local_logger:Now training epoch 77. LR=0.000152 -INFO:local_logger:----- Epoch[076/800], Train Loss: 0.6904, time: 859.60 -INFO:local_logger:Now training epoch 77. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-76-Loss-0.6905609986769955.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-76-Loss-0.6905609986769955.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-76-Loss-0.6905609986769955.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-76-Loss-0.6905609986769955.pdopt -INFO:local_logger:Now training epoch 77. LR=0.000152 -INFO:master_logger:Now training epoch 77. LR=0.000152 -INFO:local_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.6953 -INFO:local_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.6740 -INFO:local_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.6827 -INFO:master_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.7074 -INFO:local_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.7042 -INFO:local_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.6947 -INFO:local_logger:Epoch[077/800], Step[0000/0626], Avg Loss: 0.6790 -INFO:local_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6899 -INFO:local_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6911 -INFO:local_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6909 -INFO:local_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6900 -INFO:master_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[077/800], Step[0100/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6901 -INFO:local_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6903 -INFO:local_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6907 -INFO:local_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6892 -INFO:master_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[077/800], Step[0200/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6904 -INFO:local_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6908 -INFO:master_logger:Epoch[077/800], Step[0300/0626], Avg Loss: 0.6901 -INFO:local_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6903 -INFO:local_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6902 -INFO:master_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[077/800], Step[0400/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6903 -INFO:local_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6899 -INFO:local_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6896 -INFO:master_logger:Epoch[077/800], Step[0500/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6906 -INFO:local_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6899 -INFO:local_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6898 -INFO:master_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[077/800], Step[0600/0626], Avg Loss: 0.6904 -INFO:local_logger:----- Epoch[077/800], Train Loss: 0.6899, time: 882.70 -INFO:local_logger:Now training epoch 78. LR=0.000152 -INFO:local_logger:----- Epoch[077/800], Train Loss: 0.6898, time: 884.37 -INFO:local_logger:Now training epoch 78. LR=0.000152 -INFO:local_logger:----- Epoch[077/800], Train Loss: 0.6901, time: 883.82 -INFO:local_logger:Now training epoch 78. LR=0.000152 -INFO:local_logger:----- Epoch[077/800], Train Loss: 0.6906, time: 883.80 -INFO:local_logger:----- Epoch[077/800], Train Loss: 0.6902, time: 883.80 -INFO:local_logger:Now training epoch 78. LR=0.000152 -INFO:local_logger:Now training epoch 78. LR=0.000152 -INFO:local_logger:----- Epoch[077/800], Train Loss: 0.6905, time: 883.93 -INFO:local_logger:Now training epoch 78. LR=0.000152 -INFO:local_logger:----- Epoch[077/800], Train Loss: 0.6898, time: 880.45 -INFO:master_logger:----- Epoch[077/800], Train Loss: 0.6902, time: 880.45 -INFO:local_logger:----- Epoch[077/800], Train Loss: 0.6905, time: 883.84 -INFO:local_logger:Now training epoch 78. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-77-Loss-0.6897522572932259.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-77-Loss-0.6897522572932259.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-77-Loss-0.6897522572932259.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-77-Loss-0.6897522572932259.pdopt -INFO:local_logger:Now training epoch 78. LR=0.000152 -INFO:master_logger:Now training epoch 78. LR=0.000152 -INFO:local_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6877 -INFO:master_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6979 -INFO:local_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[078/800], Step[0000/0626], Avg Loss: 0.6743 -INFO:local_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6903 -INFO:local_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6884 -INFO:local_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6884 -INFO:local_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6902 -INFO:master_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0100/0626], Avg Loss: 0.6903 -INFO:local_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6895 -INFO:master_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6899 -INFO:local_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[078/800], Step[0200/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6896 -INFO:master_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6903 -INFO:local_logger:Epoch[078/800], Step[0300/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6901 -INFO:master_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6894 -INFO:local_logger:Epoch[078/800], Step[0400/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6894 -INFO:local_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6898 -INFO:master_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0500/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6897 -INFO:master_logger:Epoch[078/800], Step[0600/0626], Avg Loss: 0.6896 -INFO:local_logger:----- Epoch[078/800], Train Loss: 0.6895, time: 850.97 -INFO:local_logger:Now training epoch 79. LR=0.000152 -INFO:local_logger:----- Epoch[078/800], Train Loss: 0.6897, time: 849.85 -INFO:local_logger:Now training epoch 79. LR=0.000152 -INFO:local_logger:----- Epoch[078/800], Train Loss: 0.6897, time: 850.45 -INFO:local_logger:Now training epoch 79. LR=0.000152 -INFO:local_logger:----- Epoch[078/800], Train Loss: 0.6894, time: 850.45 -INFO:local_logger:Now training epoch 79. LR=0.000152 -INFO:local_logger:----- Epoch[078/800], Train Loss: 0.6895, time: 846.72 -INFO:master_logger:----- Epoch[078/800], Train Loss: 0.6896, time: 846.72 -INFO:local_logger:----- Epoch[078/800], Train Loss: 0.6897, time: 850.44 -INFO:local_logger:Now training epoch 79. LR=0.000152 -INFO:local_logger:----- Epoch[078/800], Train Loss: 0.6894, time: 850.46 -INFO:local_logger:Now training epoch 79. LR=0.000152 -INFO:local_logger:----- Epoch[078/800], Train Loss: 0.6898, time: 850.47 -INFO:local_logger:Now training epoch 79. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-78-Loss-0.6895287958086865.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-78-Loss-0.6895287958086865.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-78-Loss-0.6895287958086865.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-78-Loss-0.6895287958086865.pdopt -INFO:local_logger:Now training epoch 79. LR=0.000152 -INFO:master_logger:Now training epoch 79. LR=0.000152 -INFO:local_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.6870 -INFO:master_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.7025 -INFO:local_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.6939 -INFO:local_logger:Epoch[079/800], Step[0000/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6901 -INFO:local_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6892 -INFO:master_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6902 -INFO:local_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[079/800], Step[0100/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6900 -INFO:master_logger:Epoch[079/800], Step[0200/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6894 -INFO:master_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6894 -INFO:local_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6889 -INFO:local_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[079/800], Step[0300/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6894 -INFO:local_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6891 -INFO:master_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[079/800], Step[0400/0626], Avg Loss: 0.6894 -INFO:local_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6895 -INFO:local_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6888 -INFO:master_logger:Epoch[079/800], Step[0500/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6894 -INFO:local_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6892 -INFO:master_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6889 -INFO:local_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6894 -INFO:local_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6896 -INFO:local_logger:Epoch[079/800], Step[0600/0626], Avg Loss: 0.6891 -INFO:local_logger:----- Epoch[079/800], Train Loss: 0.6892, time: 888.30 -INFO:local_logger:Now training epoch 80. LR=0.000152 -INFO:local_logger:----- Epoch[079/800], Train Loss: 0.6889, time: 888.13 -INFO:local_logger:----- Epoch[079/800], Train Loss: 0.6891, time: 888.74 -INFO:local_logger:Now training epoch 80. LR=0.000152 -INFO:local_logger:Now training epoch 80. LR=0.000152 -INFO:local_logger:----- Epoch[079/800], Train Loss: 0.6896, time: 888.14 -INFO:local_logger:Now training epoch 80. LR=0.000152 -INFO:local_logger:----- Epoch[079/800], Train Loss: 0.6886, time: 884.16 -INFO:master_logger:----- Epoch[079/800], Train Loss: 0.6892, time: 884.16 -INFO:local_logger:----- Epoch[079/800], Train Loss: 0.6893, time: 888.24 -INFO:local_logger:Now training epoch 80. LR=0.000152 -INFO:local_logger:----- Epoch[079/800], Train Loss: 0.6896, time: 888.24 -INFO:local_logger:Now training epoch 80. LR=0.000152 -INFO:local_logger:----- Epoch[079/800], Train Loss: 0.6893, time: 888.26 -INFO:local_logger:Now training epoch 80. LR=0.000152 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-79-Loss-0.6886302635034396.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-79-Loss-0.6886302635034396.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-79-Loss-0.6886302635034396.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-79-Loss-0.6886302635034396.pdopt -INFO:local_logger:Now training epoch 80. LR=0.000152 -INFO:master_logger:Now training epoch 80. LR=0.000152 -INFO:local_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.6859 -INFO:master_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.7038 -INFO:local_logger:Epoch[080/800], Step[0000/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6889 -INFO:local_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6877 -INFO:master_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[080/800], Step[0100/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6886 -INFO:master_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[080/800], Step[0200/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6889 -INFO:local_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6887 -INFO:master_logger:Epoch[080/800], Step[0300/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6886 -INFO:master_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[080/800], Step[0400/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6888 -INFO:master_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6889 -INFO:local_logger:Epoch[080/800], Step[0500/0626], Avg Loss: 0.6889 -INFO:local_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6888 -INFO:master_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6889 -INFO:local_logger:Epoch[080/800], Step[0600/0626], Avg Loss: 0.6890 -INFO:local_logger:----- Epoch[080/800], Train Loss: 0.6890, time: 849.18 -INFO:local_logger:Now training epoch 81. LR=0.000153 -INFO:local_logger:----- Epoch[080/800], Train Loss: 0.6888, time: 849.16 -INFO:local_logger:Now training epoch 81. LR=0.000153 -INFO:local_logger:----- Epoch[080/800], Train Loss: 0.6890, time: 849.36 -INFO:local_logger:Now training epoch 81. LR=0.000153 -INFO:local_logger:----- Epoch[080/800], Train Loss: 0.6887, time: 849.41 -INFO:local_logger:Now training epoch 81. LR=0.000153 -INFO:local_logger:----- Epoch[080/800], Train Loss: 0.6888, time: 849.87 -INFO:local_logger:Now training epoch 81. LR=0.000153 -INFO:local_logger:----- Epoch[080/800], Train Loss: 0.6889, time: 849.31 -INFO:local_logger:Now training epoch 81. LR=0.000153 -INFO:local_logger:----- Epoch[080/800], Train Loss: 0.6888, time: 845.47 -INFO:master_logger:----- Epoch[080/800], Train Loss: 0.6889, time: 845.47 -INFO:local_logger:----- Epoch[080/800], Train Loss: 0.6891, time: 849.50 -INFO:local_logger:Now training epoch 81. LR=0.000153 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-80-Loss-0.6887507163269282.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-80-Loss-0.6887507163269282.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-80-Loss-0.6887507163269282.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-80-Loss-0.6887507163269282.pdopt -INFO:local_logger:Now training epoch 81. LR=0.000153 -INFO:master_logger:Now training epoch 81. LR=0.000153 -INFO:local_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.6999 -INFO:master_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.6984 -INFO:local_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.7003 -INFO:local_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[081/800], Step[0000/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6890 -INFO:local_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6882 -INFO:master_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[081/800], Step[0100/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6892 -INFO:local_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6886 -INFO:master_logger:Epoch[081/800], Step[0200/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6881 -INFO:master_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[081/800], Step[0300/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6886 -INFO:master_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[081/800], Step[0400/0626], Avg Loss: 0.6888 -INFO:local_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6884 -INFO:local_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6878 -INFO:master_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[081/800], Step[0500/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6886 -INFO:master_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[081/800], Step[0600/0626], Avg Loss: 0.6885 -INFO:local_logger:----- Epoch[081/800], Train Loss: 0.6878, time: 886.96 -INFO:local_logger:Now training epoch 82. LR=0.000153 -INFO:local_logger:----- Epoch[081/800], Train Loss: 0.6882, time: 887.25 -INFO:local_logger:Now training epoch 82. LR=0.000153 -INFO:local_logger:----- Epoch[081/800], Train Loss: 0.6882, time: 887.32 -INFO:local_logger:Now training epoch 82. LR=0.000153 -INFO:local_logger:----- Epoch[081/800], Train Loss: 0.6883, time: 887.55 -INFO:local_logger:Now training epoch 82. LR=0.000153 -INFO:local_logger:----- Epoch[081/800], Train Loss: 0.6878, time: 887.45 -INFO:local_logger:Now training epoch 82. LR=0.000153 -INFO:local_logger:----- Epoch[081/800], Train Loss: 0.6883, time: 887.60 -INFO:local_logger:Now training epoch 82. LR=0.000153 -INFO:local_logger:----- Epoch[081/800], Train Loss: 0.6880, time: 887.62 -INFO:local_logger:Now training epoch 82. LR=0.000153 -INFO:local_logger:----- Epoch[081/800], Train Loss: 0.6885, time: 883.70 -INFO:master_logger:----- Epoch[081/800], Train Loss: 0.6882, time: 883.70 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-81-Loss-0.6885438528329545.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-81-Loss-0.6885438528329545.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-81-Loss-0.6885438528329545.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-81-Loss-0.6885438528329545.pdopt -INFO:local_logger:Now training epoch 82. LR=0.000153 -INFO:master_logger:Now training epoch 82. LR=0.000153 -INFO:local_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.6723 -INFO:local_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.6813 -INFO:master_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.6948 -INFO:local_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.6790 -INFO:local_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.6783 -INFO:local_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.7052 -INFO:local_logger:Epoch[082/800], Step[0000/0626], Avg Loss: 0.6743 -INFO:local_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6875 -INFO:local_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6874 -INFO:master_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6871 -INFO:local_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[082/800], Step[0100/0626], Avg Loss: 0.6882 -INFO:local_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6875 -INFO:master_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[082/800], Step[0200/0626], Avg Loss: 0.6884 -INFO:local_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6882 -INFO:master_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[082/800], Step[0300/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6871 -INFO:local_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6879 -INFO:master_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[082/800], Step[0400/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6872 -INFO:local_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6874 -INFO:master_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[082/800], Step[0500/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6873 -INFO:master_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6871 -INFO:local_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[082/800], Step[0600/0626], Avg Loss: 0.6880 -INFO:local_logger:----- Epoch[082/800], Train Loss: 0.6876, time: 851.19 -INFO:local_logger:Now training epoch 83. LR=0.000153 -INFO:local_logger:----- Epoch[082/800], Train Loss: 0.6870, time: 851.29 -INFO:local_logger:Now training epoch 83. LR=0.000153 -INFO:local_logger:----- Epoch[082/800], Train Loss: 0.6876, time: 851.29 -INFO:local_logger:Now training epoch 83. LR=0.000153 -INFO:local_logger:----- Epoch[082/800], Train Loss: 0.6881, time: 851.15 -INFO:local_logger:Now training epoch 83. LR=0.000153 -INFO:local_logger:----- Epoch[082/800], Train Loss: 0.6881, time: 851.69 -INFO:local_logger:Now training epoch 83. LR=0.000153 -INFO:local_logger:----- Epoch[082/800], Train Loss: 0.6873, time: 851.08 -INFO:local_logger:Now training epoch 83. LR=0.000153 -INFO:local_logger:----- Epoch[082/800], Train Loss: 0.6877, time: 847.31 -INFO:master_logger:----- Epoch[082/800], Train Loss: 0.6876, time: 847.31 -INFO:local_logger:----- Epoch[082/800], Train Loss: 0.6879, time: 851.21 -INFO:local_logger:Now training epoch 83. LR=0.000153 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-82-Loss-0.687688001142508.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-82-Loss-0.687688001142508.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-82-Loss-0.687688001142508.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-82-Loss-0.687688001142508.pdopt -INFO:local_logger:Now training epoch 83. LR=0.000153 -INFO:master_logger:Now training epoch 83. LR=0.000153 -INFO:local_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.6878 -INFO:master_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.6793 -INFO:local_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.6913 -INFO:local_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.6781 -INFO:local_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.7000 -INFO:local_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.6871 -INFO:local_logger:Epoch[083/800], Step[0000/0626], Avg Loss: 0.6893 -INFO:local_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6891 -INFO:local_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6875 -INFO:local_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6882 -INFO:master_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[083/800], Step[0100/0626], Avg Loss: 0.6872 -INFO:local_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6871 -INFO:local_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6890 -INFO:master_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[083/800], Step[0200/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6877 -INFO:local_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6871 -INFO:local_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6868 -INFO:local_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6872 -INFO:master_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[083/800], Step[0300/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6879 -INFO:local_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6873 -INFO:master_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6867 -INFO:local_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[083/800], Step[0400/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6875 -INFO:local_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6866 -INFO:master_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6872 -INFO:local_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[083/800], Step[0500/0626], Avg Loss: 0.6868 -INFO:local_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6872 -INFO:local_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6871 -INFO:local_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6875 -INFO:local_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6866 -INFO:master_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6872 -INFO:local_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[083/800], Step[0600/0626], Avg Loss: 0.6867 -INFO:local_logger:----- Epoch[083/800], Train Loss: 0.6872, time: 894.39 -INFO:local_logger:----- Epoch[083/800], Train Loss: 0.6868, time: 894.42 -INFO:local_logger:Now training epoch 84. LR=0.000153 -INFO:local_logger:Now training epoch 84. LR=0.000153 -INFO:local_logger:----- Epoch[083/800], Train Loss: 0.6876, time: 894.42 -INFO:local_logger:Now training epoch 84. LR=0.000153 -INFO:local_logger:----- Epoch[083/800], Train Loss: 0.6871, time: 890.84 -INFO:master_logger:----- Epoch[083/800], Train Loss: 0.6872, time: 890.84 -INFO:local_logger:----- Epoch[083/800], Train Loss: 0.6876, time: 894.60 -INFO:local_logger:Now training epoch 84. LR=0.000153 -INFO:local_logger:----- Epoch[083/800], Train Loss: 0.6873, time: 894.67 -INFO:local_logger:Now training epoch 84. LR=0.000153 -INFO:local_logger:----- Epoch[083/800], Train Loss: 0.6875, time: 894.68 -INFO:local_logger:Now training epoch 84. LR=0.000153 -INFO:local_logger:----- Epoch[083/800], Train Loss: 0.6866, time: 894.74 -INFO:local_logger:Now training epoch 84. LR=0.000153 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-83-Loss-0.6870564654976226.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-83-Loss-0.6870564654976226.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-83-Loss-0.6870564654976226.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-83-Loss-0.6870564654976226.pdopt -INFO:local_logger:Now training epoch 84. LR=0.000153 -INFO:master_logger:Now training epoch 84. LR=0.000153 -INFO:local_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6927 -INFO:master_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6881 -INFO:local_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6745 -INFO:local_logger:Epoch[084/800], Step[0000/0626], Avg Loss: 0.6914 -INFO:local_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6875 -INFO:local_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6880 -INFO:master_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6867 -INFO:local_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[084/800], Step[0100/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6867 -INFO:master_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6884 -INFO:local_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[084/800], Step[0200/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6867 -INFO:local_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6878 -INFO:local_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6867 -INFO:local_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6868 -INFO:local_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6868 -INFO:master_logger:Epoch[084/800], Step[0300/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6867 -INFO:local_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6875 -INFO:local_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6876 -INFO:master_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[084/800], Step[0400/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6866 -INFO:master_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6868 -INFO:local_logger:Epoch[084/800], Step[0500/0626], Avg Loss: 0.6867 -INFO:local_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6875 -INFO:local_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6867 -INFO:master_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[084/800], Step[0600/0626], Avg Loss: 0.6865 -INFO:local_logger:----- Epoch[084/800], Train Loss: 0.6870, time: 854.61 -INFO:master_logger:----- Epoch[084/800], Train Loss: 0.6869, time: 854.61 -INFO:local_logger:----- Epoch[084/800], Train Loss: 0.6868, time: 858.62 -INFO:local_logger:Now training epoch 85. LR=0.000153 -INFO:local_logger:----- Epoch[084/800], Train Loss: 0.6865, time: 858.88 -INFO:local_logger:Now training epoch 85. LR=0.000153 -INFO:local_logger:----- Epoch[084/800], Train Loss: 0.6866, time: 858.64 -INFO:local_logger:Now training epoch 85. LR=0.000153 -INFO:local_logger:----- Epoch[084/800], Train Loss: 0.6874, time: 858.90 -INFO:local_logger:Now training epoch 85. LR=0.000153 -INFO:local_logger:----- Epoch[084/800], Train Loss: 0.6868, time: 858.67 -INFO:local_logger:Now training epoch 85. LR=0.000153 -INFO:local_logger:----- Epoch[084/800], Train Loss: 0.6875, time: 858.57 -INFO:local_logger:Now training epoch 85. LR=0.000153 -INFO:local_logger:----- Epoch[084/800], Train Loss: 0.6864, time: 858.93 -INFO:local_logger:Now training epoch 85. LR=0.000153 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-84-Loss-0.6870198997374206.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-84-Loss-0.6870198997374206.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-84-Loss-0.6870198997374206.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-84-Loss-0.6870198997374206.pdopt -INFO:local_logger:Now training epoch 85. LR=0.000153 -INFO:master_logger:Now training epoch 85. LR=0.000153 -INFO:local_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6915 -INFO:local_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6830 -INFO:master_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6898 -INFO:local_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6935 -INFO:local_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6929 -INFO:local_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[085/800], Step[0000/0626], Avg Loss: 0.6803 -INFO:local_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6868 -INFO:local_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6843 -INFO:master_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[085/800], Step[0100/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6868 -INFO:local_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6865 -INFO:master_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[085/800], Step[0200/0626], Avg Loss: 0.6856 -INFO:local_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6867 -INFO:local_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6866 -INFO:master_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[085/800], Step[0300/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6856 -INFO:local_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6869 -INFO:local_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6866 -INFO:master_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[085/800], Step[0400/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6868 -INFO:local_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6860 -INFO:master_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[085/800], Step[0500/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6859 -INFO:master_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[085/800], Step[0600/0626], Avg Loss: 0.6862 -INFO:local_logger:----- Epoch[085/800], Train Loss: 0.6862, time: 892.65 -INFO:local_logger:Now training epoch 86. LR=0.000153 -INFO:local_logger:----- Epoch[085/800], Train Loss: 0.6862, time: 893.08 -INFO:local_logger:Now training epoch 86. LR=0.000153 -INFO:local_logger:----- Epoch[085/800], Train Loss: 0.6859, time: 890.01 -INFO:master_logger:----- Epoch[085/800], Train Loss: 0.6862, time: 890.01 -INFO:local_logger:----- Epoch[085/800], Train Loss: 0.6864, time: 893.68 -INFO:local_logger:Now training epoch 86. LR=0.000153 -INFO:local_logger:----- Epoch[085/800], Train Loss: 0.6857, time: 893.70 -INFO:local_logger:Now training epoch 86. LR=0.000153 -INFO:local_logger:----- Epoch[085/800], Train Loss: 0.6863, time: 893.73 -INFO:local_logger:Now training epoch 86. LR=0.000153 -INFO:local_logger:----- Epoch[085/800], Train Loss: 0.6867, time: 893.72 -INFO:local_logger:Now training epoch 86. LR=0.000153 -INFO:local_logger:----- Epoch[085/800], Train Loss: 0.6864, time: 893.76 -INFO:local_logger:Now training epoch 86. LR=0.000153 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-85-Loss-0.6859439270970955.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-85-Loss-0.6859439270970955.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-85-Loss-0.6859439270970955.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-85-Loss-0.6859439270970955.pdopt -INFO:local_logger:Now training epoch 86. LR=0.000153 -INFO:master_logger:Now training epoch 86. LR=0.000153 -INFO:local_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.6904 -INFO:local_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.7015 -INFO:local_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.7112 -INFO:local_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.6865 -INFO:local_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.6773 -INFO:master_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.6887 -INFO:local_logger:Epoch[086/800], Step[0000/0626], Avg Loss: 0.6796 -INFO:local_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6876 -INFO:master_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[086/800], Step[0100/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6862 -INFO:master_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[086/800], Step[0200/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6860 -INFO:master_logger:Epoch[086/800], Step[0300/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6862 -INFO:master_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[086/800], Step[0400/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6862 -INFO:master_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[086/800], Step[0500/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6858 -INFO:master_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[086/800], Step[0600/0626], Avg Loss: 0.6858 -INFO:local_logger:----- Epoch[086/800], Train Loss: 0.6860, time: 859.95 -INFO:local_logger:Now training epoch 87. LR=0.000153 -INFO:local_logger:----- Epoch[086/800], Train Loss: 0.6859, time: 860.07 -INFO:local_logger:Now training epoch 87. LR=0.000153 -INFO:local_logger:----- Epoch[086/800], Train Loss: 0.6862, time: 856.92 -INFO:master_logger:----- Epoch[086/800], Train Loss: 0.6860, time: 856.92 -INFO:local_logger:----- Epoch[086/800], Train Loss: 0.6860, time: 861.39 -INFO:local_logger:Now training epoch 87. LR=0.000153 -INFO:local_logger:----- Epoch[086/800], Train Loss: 0.6862, time: 860.97 -INFO:local_logger:Now training epoch 87. LR=0.000153 -INFO:local_logger:----- Epoch[086/800], Train Loss: 0.6857, time: 860.38 -INFO:local_logger:Now training epoch 87. LR=0.000153 -INFO:local_logger:----- Epoch[086/800], Train Loss: 0.6858, time: 860.35 -INFO:local_logger:Now training epoch 87. LR=0.000153 -INFO:local_logger:----- Epoch[086/800], Train Loss: 0.6859, time: 860.35 -INFO:local_logger:Now training epoch 87. LR=0.000153 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-86-Loss-0.6862062182739747.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-86-Loss-0.6862062182739747.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-86-Loss-0.6862062182739747.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-86-Loss-0.6862062182739747.pdopt -INFO:local_logger:Now training epoch 87. LR=0.000153 -INFO:master_logger:Now training epoch 87. LR=0.000153 -INFO:local_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6924 -INFO:local_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6883 -INFO:master_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6722 -INFO:local_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6880 -INFO:local_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6993 -INFO:local_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6908 -INFO:local_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[087/800], Step[0000/0626], Avg Loss: 0.6712 -INFO:local_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6852 -INFO:local_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6868 -INFO:local_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6862 -INFO:master_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0100/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6856 -INFO:local_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6864 -INFO:master_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[087/800], Step[0200/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6856 -INFO:local_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6860 -INFO:master_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0300/0626], Avg Loss: 0.6864 -INFO:local_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6852 -INFO:local_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6862 -INFO:local_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6858 -INFO:master_logger:Epoch[087/800], Step[0400/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6852 -INFO:master_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[087/800], Step[0500/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6858 -INFO:master_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6856 -INFO:local_logger:Epoch[087/800], Step[0600/0626], Avg Loss: 0.6852 -INFO:local_logger:----- Epoch[087/800], Train Loss: 0.6853, time: 895.08 -INFO:local_logger:Now training epoch 88. LR=0.000153 -INFO:local_logger:----- Epoch[087/800], Train Loss: 0.6855, time: 895.09 -INFO:local_logger:Now training epoch 88. LR=0.000153 -INFO:local_logger:----- Epoch[087/800], Train Loss: 0.6860, time: 895.73 -INFO:local_logger:Now training epoch 88. LR=0.000153 -INFO:local_logger:----- Epoch[087/800], Train Loss: 0.6852, time: 896.01 -INFO:local_logger:Now training epoch 88. LR=0.000153 -INFO:local_logger:----- Epoch[087/800], Train Loss: 0.6856, time: 895.77 -INFO:local_logger:----- Epoch[087/800], Train Loss: 0.6857, time: 896.11 -INFO:local_logger:Now training epoch 88. LR=0.000153 -INFO:local_logger:Now training epoch 88. LR=0.000153 -INFO:local_logger:----- Epoch[087/800], Train Loss: 0.6857, time: 892.15 -INFO:master_logger:----- Epoch[087/800], Train Loss: 0.6856, time: 892.15 -INFO:local_logger:----- Epoch[087/800], Train Loss: 0.6855, time: 895.77 -INFO:local_logger:Now training epoch 88. LR=0.000153 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-87-Loss-0.6857299549603768.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-87-Loss-0.6857299549603768.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-87-Loss-0.6857299549603768.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-87-Loss-0.6857299549603768.pdopt -INFO:local_logger:Now training epoch 88. LR=0.000153 -INFO:master_logger:Now training epoch 88. LR=0.000153 -INFO:local_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6931 -INFO:local_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6886 -INFO:local_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6765 -INFO:local_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6795 -INFO:local_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6839 -INFO:master_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[088/800], Step[0000/0626], Avg Loss: 0.6800 -INFO:local_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6876 -INFO:local_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6868 -INFO:master_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[088/800], Step[0100/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6863 -INFO:local_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6852 -INFO:local_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6860 -INFO:master_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[088/800], Step[0200/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6861 -INFO:local_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6859 -INFO:master_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[088/800], Step[0300/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6859 -INFO:master_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6856 -INFO:local_logger:Epoch[088/800], Step[0400/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6857 -INFO:master_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[088/800], Step[0500/0626], Avg Loss: 0.6857 -INFO:local_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6854 -INFO:master_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[088/800], Step[0600/0626], Avg Loss: 0.6853 -INFO:local_logger:----- Epoch[088/800], Train Loss: 0.6855, time: 854.47 -INFO:master_logger:----- Epoch[088/800], Train Loss: 0.6853, time: 854.47 -INFO:local_logger:----- Epoch[088/800], Train Loss: 0.6854, time: 859.85 -INFO:local_logger:Now training epoch 89. LR=0.000154 -INFO:local_logger:----- Epoch[088/800], Train Loss: 0.6849, time: 859.24 -INFO:local_logger:Now training epoch 89. LR=0.000154 -INFO:local_logger:----- Epoch[088/800], Train Loss: 0.6852, time: 859.32 -INFO:local_logger:Now training epoch 89. LR=0.000154 -INFO:local_logger:----- Epoch[088/800], Train Loss: 0.6854, time: 859.33 -INFO:local_logger:Now training epoch 89. LR=0.000154 -INFO:local_logger:----- Epoch[088/800], Train Loss: 0.6853, time: 859.35 -INFO:local_logger:----- Epoch[088/800], Train Loss: 0.6853, time: 859.33 -INFO:local_logger:Now training epoch 89. LR=0.000154 -INFO:local_logger:Now training epoch 89. LR=0.000154 -INFO:local_logger:----- Epoch[088/800], Train Loss: 0.6856, time: 860.00 -INFO:local_logger:Now training epoch 89. LR=0.000154 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-88-Loss-0.6854734038612285.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-88-Loss-0.6854734038612285.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-88-Loss-0.6854734038612285.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-88-Loss-0.6854734038612285.pdopt -INFO:local_logger:Now training epoch 89. LR=0.000154 -INFO:master_logger:Now training epoch 89. LR=0.000154 -INFO:local_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6780 -INFO:local_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6912 -INFO:local_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6884 -INFO:local_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6960 -INFO:local_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6791 -INFO:local_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6747 -INFO:local_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6772 -INFO:local_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6825 -INFO:master_logger:Epoch[089/800], Step[0000/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6856 -INFO:local_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6850 -INFO:master_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6848 -INFO:local_logger:Epoch[089/800], Step[0100/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6851 -INFO:local_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6852 -INFO:local_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6844 -INFO:master_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6852 -INFO:local_logger:Epoch[089/800], Step[0200/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6854 -INFO:local_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6852 -INFO:local_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6847 -INFO:master_logger:Epoch[089/800], Step[0300/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6854 -INFO:master_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6851 -INFO:local_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6856 -INFO:local_logger:Epoch[089/800], Step[0400/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6851 -INFO:local_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6848 -INFO:local_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6849 -INFO:master_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0500/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6845 -INFO:master_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6848 -INFO:local_logger:Epoch[089/800], Step[0600/0626], Avg Loss: 0.6849 -INFO:local_logger:----- Epoch[089/800], Train Loss: 0.6845, time: 884.60 -INFO:local_logger:Now training epoch 90. LR=0.000154 -INFO:local_logger:----- Epoch[089/800], Train Loss: 0.6849, time: 885.46 -INFO:local_logger:Now training epoch 90. LR=0.000154 -INFO:local_logger:----- Epoch[089/800], Train Loss: 0.6846, time: 882.71 -INFO:master_logger:----- Epoch[089/800], Train Loss: 0.6848, time: 882.71 -INFO:local_logger:----- Epoch[089/800], Train Loss: 0.6850, time: 885.49 -INFO:local_logger:Now training epoch 90. LR=0.000154 -INFO:local_logger:----- Epoch[089/800], Train Loss: 0.6845, time: 885.58 -INFO:local_logger:Now training epoch 90. LR=0.000154 -INFO:local_logger:----- Epoch[089/800], Train Loss: 0.6848, time: 885.61 -INFO:local_logger:Now training epoch 90. LR=0.000154 -INFO:local_logger:----- Epoch[089/800], Train Loss: 0.6849, time: 885.54 -INFO:local_logger:Now training epoch 90. LR=0.000154 -INFO:local_logger:----- Epoch[089/800], Train Loss: 0.6849, time: 885.50 -INFO:local_logger:Now training epoch 90. LR=0.000154 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-89-Loss-0.6845652029200572.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-89-Loss-0.6845652029200572.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-89-Loss-0.6845652029200572.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-89-Loss-0.6845652029200572.pdopt -INFO:local_logger:Now training epoch 90. LR=0.000154 -INFO:master_logger:Now training epoch 90. LR=0.000154 -INFO:local_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6924 -INFO:local_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6899 -INFO:local_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6763 -INFO:local_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6748 -INFO:master_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6787 -INFO:local_logger:Epoch[090/800], Step[0000/0626], Avg Loss: 0.6883 -INFO:local_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6851 -INFO:local_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6853 -INFO:master_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[090/800], Step[0100/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6850 -INFO:master_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[090/800], Step[0200/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6851 -INFO:local_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6848 -INFO:local_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6841 -INFO:master_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[090/800], Step[0300/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6845 -INFO:master_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[090/800], Step[0400/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6842 -INFO:master_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[090/800], Step[0500/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6842 -INFO:master_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[090/800], Step[0600/0626], Avg Loss: 0.6845 -INFO:local_logger:----- Epoch[090/800], Train Loss: 0.6845, time: 851.60 -INFO:local_logger:Now training epoch 91. LR=0.000154 -INFO:local_logger:----- Epoch[090/800], Train Loss: 0.6839, time: 851.05 -INFO:local_logger:Now training epoch 91. LR=0.000154 -INFO:local_logger:----- Epoch[090/800], Train Loss: 0.6844, time: 851.17 -INFO:local_logger:Now training epoch 91. LR=0.000154 -INFO:local_logger:----- Epoch[090/800], Train Loss: 0.6844, time: 851.26 -INFO:local_logger:Now training epoch 91. LR=0.000154 -INFO:local_logger:----- Epoch[090/800], Train Loss: 0.6847, time: 851.26 -INFO:local_logger:Now training epoch 91. LR=0.000154 -INFO:local_logger:----- Epoch[090/800], Train Loss: 0.6844, time: 851.23 -INFO:local_logger:----- Epoch[090/800], Train Loss: 0.6841, time: 847.55 -INFO:local_logger:Now training epoch 91. LR=0.000154 -INFO:master_logger:----- Epoch[090/800], Train Loss: 0.6843, time: 847.55 -INFO:local_logger:----- Epoch[090/800], Train Loss: 0.6840, time: 851.24 -INFO:local_logger:Now training epoch 91. LR=0.000154 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-90-Loss-0.6841203801495528.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-90-Loss-0.6841203801495528.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-90-Loss-0.6841203801495528.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-90-Loss-0.6841203801495528.pdopt -INFO:local_logger:Now training epoch 91. LR=0.000154 -INFO:master_logger:Now training epoch 91. LR=0.000154 -INFO:local_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6872 -INFO:local_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6756 -INFO:master_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6859 -INFO:local_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6748 -INFO:local_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6793 -INFO:local_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6961 -INFO:local_logger:Epoch[091/800], Step[0000/0626], Avg Loss: 0.6897 -INFO:local_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6833 -INFO:local_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6833 -INFO:master_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[091/800], Step[0100/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6833 -INFO:local_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6837 -INFO:master_logger:Epoch[091/800], Step[0200/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6834 -INFO:master_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[091/800], Step[0300/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6847 -INFO:master_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[091/800], Step[0400/0626], Avg Loss: 0.6848 -INFO:local_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6840 -INFO:master_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[091/800], Step[0500/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6842 -INFO:master_logger:Epoch[091/800], Step[0600/0626], Avg Loss: 0.6842 -INFO:local_logger:----- Epoch[091/800], Train Loss: 0.6843, time: 886.91 -INFO:local_logger:Now training epoch 92. LR=0.000154 -INFO:local_logger:----- Epoch[091/800], Train Loss: 0.6838, time: 886.90 -INFO:local_logger:Now training epoch 92. LR=0.000154 -INFO:local_logger:----- Epoch[091/800], Train Loss: 0.6848, time: 887.37 -INFO:local_logger:Now training epoch 92. LR=0.000154 -INFO:local_logger:----- Epoch[091/800], Train Loss: 0.6844, time: 887.49 -INFO:local_logger:Now training epoch 92. LR=0.000154 -INFO:local_logger:----- Epoch[091/800], Train Loss: 0.6842, time: 887.83 -INFO:local_logger:Now training epoch 92. LR=0.000154 -INFO:local_logger:----- Epoch[091/800], Train Loss: 0.6839, time: 887.30 -INFO:local_logger:Now training epoch 92. LR=0.000154 -INFO:local_logger:----- Epoch[091/800], Train Loss: 0.6839, time: 883.60 -INFO:master_logger:----- Epoch[091/800], Train Loss: 0.6841, time: 883.60 -INFO:local_logger:----- Epoch[091/800], Train Loss: 0.6838, time: 887.32 -INFO:local_logger:Now training epoch 92. LR=0.000154 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-91-Loss-0.6838902651592956.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-91-Loss-0.6838902651592956.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-91-Loss-0.6838902651592956.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-91-Loss-0.6838902651592956.pdopt -INFO:local_logger:Now training epoch 92. LR=0.000154 -INFO:master_logger:Now training epoch 92. LR=0.000154 -INFO:local_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6957 -INFO:local_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6676 -INFO:local_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6676 -INFO:local_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6866 -INFO:master_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6922 -INFO:local_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6936 -INFO:local_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[092/800], Step[0000/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6840 -INFO:master_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[092/800], Step[0100/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6831 -INFO:master_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[092/800], Step[0200/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6839 -INFO:master_logger:Epoch[092/800], Step[0300/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6841 -INFO:master_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0400/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6840 -INFO:master_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[092/800], Step[0500/0626], Avg Loss: 0.6842 -INFO:local_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6839 -INFO:master_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[092/800], Step[0600/0626], Avg Loss: 0.6836 -INFO:local_logger:----- Epoch[092/800], Train Loss: 0.6839, time: 858.55 -INFO:local_logger:Now training epoch 93. LR=0.000154 -INFO:local_logger:----- Epoch[092/800], Train Loss: 0.6839, time: 858.56 -INFO:local_logger:Now training epoch 93. LR=0.000154 -INFO:local_logger:----- Epoch[092/800], Train Loss: 0.6836, time: 858.97 -INFO:local_logger:Now training epoch 93. LR=0.000154 -INFO:local_logger:----- Epoch[092/800], Train Loss: 0.6833, time: 858.75 -INFO:local_logger:Now training epoch 93. LR=0.000154 -INFO:local_logger:----- Epoch[092/800], Train Loss: 0.6837, time: 858.76 -INFO:local_logger:Now training epoch 93. LR=0.000154 -INFO:local_logger:----- Epoch[092/800], Train Loss: 0.6835, time: 858.85 -INFO:local_logger:Now training epoch 93. LR=0.000154 -INFO:local_logger:----- Epoch[092/800], Train Loss: 0.6841, time: 855.23 -INFO:master_logger:----- Epoch[092/800], Train Loss: 0.6837, time: 855.23 -INFO:local_logger:----- Epoch[092/800], Train Loss: 0.6836, time: 859.34 -INFO:local_logger:Now training epoch 93. LR=0.000154 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-92-Loss-0.6840875268930822.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-92-Loss-0.6840875268930822.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-92-Loss-0.6840875268930822.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-92-Loss-0.6840875268930822.pdopt -INFO:local_logger:Now training epoch 93. LR=0.000154 -INFO:master_logger:Now training epoch 93. LR=0.000154 -INFO:local_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6662 -INFO:local_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6933 -INFO:local_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6938 -INFO:local_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6859 -INFO:master_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6846 -INFO:local_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6770 -INFO:local_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6934 -INFO:local_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6802 -INFO:local_logger:Epoch[093/800], Step[0000/0626], Avg Loss: 0.6873 -INFO:local_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6848 -INFO:local_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6832 -INFO:master_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[093/800], Step[0100/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6835 -INFO:master_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[093/800], Step[0200/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6831 -INFO:master_logger:Epoch[093/800], Step[0300/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6830 -INFO:master_logger:Epoch[093/800], Step[0400/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6830 -INFO:master_logger:Epoch[093/800], Step[0500/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6833 -INFO:local_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6827 -INFO:master_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6833 -INFO:local_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[093/800], Step[0600/0626], Avg Loss: 0.6835 -INFO:local_logger:----- Epoch[093/800], Train Loss: 0.6837, time: 883.02 -INFO:local_logger:Now training epoch 94. LR=0.000154 -INFO:local_logger:----- Epoch[093/800], Train Loss: 0.6832, time: 882.93 -INFO:local_logger:Now training epoch 94. LR=0.000154 -INFO:local_logger:----- Epoch[093/800], Train Loss: 0.6827, time: 879.60 -INFO:master_logger:----- Epoch[093/800], Train Loss: 0.6834, time: 879.60 -INFO:local_logger:----- Epoch[093/800], Train Loss: 0.6832, time: 883.71 -INFO:local_logger:Now training epoch 94. LR=0.000154 -INFO:local_logger:----- Epoch[093/800], Train Loss: 0.6834, time: 883.93 -INFO:local_logger:Now training epoch 94. LR=0.000154 -INFO:local_logger:----- Epoch[093/800], Train Loss: 0.6837, time: 883.92 -INFO:local_logger:Now training epoch 94. LR=0.000154 -INFO:local_logger:----- Epoch[093/800], Train Loss: 0.6836, time: 884.00 -INFO:local_logger:Now training epoch 94. LR=0.000154 -INFO:local_logger:----- Epoch[093/800], Train Loss: 0.6834, time: 883.71 -INFO:local_logger:Now training epoch 94. LR=0.000154 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-93-Loss-0.6827037774682657.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-93-Loss-0.6827037774682657.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-93-Loss-0.6827037774682657.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-93-Loss-0.6827037774682657.pdopt -INFO:local_logger:Now training epoch 94. LR=0.000154 -INFO:master_logger:Now training epoch 94. LR=0.000154 -INFO:local_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6799 -INFO:local_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6772 -INFO:local_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6722 -INFO:master_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6904 -INFO:local_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6963 -INFO:local_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6866 -INFO:local_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[094/800], Step[0000/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6834 -INFO:master_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[094/800], Step[0100/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6847 -INFO:local_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6828 -INFO:master_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[094/800], Step[0200/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6849 -INFO:local_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6828 -INFO:master_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[094/800], Step[0300/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6845 -INFO:local_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6831 -INFO:master_logger:Epoch[094/800], Step[0400/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6833 -INFO:local_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6832 -INFO:master_logger:Epoch[094/800], Step[0500/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6840 -INFO:local_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6828 -INFO:master_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[094/800], Step[0600/0626], Avg Loss: 0.6825 -INFO:local_logger:----- Epoch[094/800], Train Loss: 0.6835, time: 860.64 -INFO:local_logger:Now training epoch 95. LR=0.000155 -INFO:local_logger:----- Epoch[094/800], Train Loss: 0.6829, time: 861.10 -INFO:local_logger:Now training epoch 95. LR=0.000155 -INFO:local_logger:----- Epoch[094/800], Train Loss: 0.6834, time: 861.04 -INFO:local_logger:Now training epoch 95. LR=0.000155 -INFO:local_logger:----- Epoch[094/800], Train Loss: 0.6827, time: 861.88 -INFO:local_logger:Now training epoch 95. LR=0.000155 -INFO:local_logger:----- Epoch[094/800], Train Loss: 0.6838, time: 861.14 -INFO:local_logger:Now training epoch 95. LR=0.000155 -INFO:local_logger:----- Epoch[094/800], Train Loss: 0.6830, time: 857.69 -INFO:master_logger:----- Epoch[094/800], Train Loss: 0.6831, time: 857.69 -INFO:local_logger:----- Epoch[094/800], Train Loss: 0.6825, time: 861.06 -INFO:local_logger:Now training epoch 95. LR=0.000155 -INFO:local_logger:----- Epoch[094/800], Train Loss: 0.6830, time: 861.17 -INFO:local_logger:Now training epoch 95. LR=0.000155 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-94-Loss-0.6830039001247509.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-94-Loss-0.6830039001247509.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-94-Loss-0.6830039001247509.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-94-Loss-0.6830039001247509.pdopt -INFO:local_logger:Now training epoch 95. LR=0.000155 -INFO:master_logger:Now training epoch 95. LR=0.000155 -INFO:local_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6832 -INFO:master_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6771 -INFO:local_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6798 -INFO:local_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6706 -INFO:local_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6905 -INFO:local_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6843 -INFO:local_logger:Epoch[095/800], Step[0000/0626], Avg Loss: 0.6855 -INFO:local_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6826 -INFO:master_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[095/800], Step[0100/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6833 -INFO:local_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6829 -INFO:master_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0200/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6833 -INFO:local_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6820 -INFO:master_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0300/0626], Avg Loss: 0.6835 -INFO:local_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6826 -INFO:master_logger:Epoch[095/800], Step[0400/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6832 -INFO:local_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6828 -INFO:master_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[095/800], Step[0500/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6825 -INFO:master_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[095/800], Step[0600/0626], Avg Loss: 0.6826 -INFO:local_logger:----- Epoch[095/800], Train Loss: 0.6831, time: 887.46 -INFO:local_logger:Now training epoch 96. LR=0.000155 -INFO:local_logger:----- Epoch[095/800], Train Loss: 0.6830, time: 886.49 -INFO:local_logger:Now training epoch 96. LR=0.000155 -INFO:local_logger:----- Epoch[095/800], Train Loss: 0.6825, time: 886.66 -INFO:local_logger:Now training epoch 96. LR=0.000155 -INFO:local_logger:----- Epoch[095/800], Train Loss: 0.6825, time: 886.64 -INFO:local_logger:Now training epoch 96. LR=0.000155 -INFO:local_logger:----- Epoch[095/800], Train Loss: 0.6829, time: 886.77 -INFO:local_logger:Now training epoch 96. LR=0.000155 -INFO:local_logger:----- Epoch[095/800], Train Loss: 0.6826, time: 886.77 -INFO:local_logger:Now training epoch 96. LR=0.000155 -INFO:local_logger:----- Epoch[095/800], Train Loss: 0.6826, time: 883.07 -INFO:master_logger:----- Epoch[095/800], Train Loss: 0.6827, time: 883.07 -INFO:local_logger:----- Epoch[095/800], Train Loss: 0.6827, time: 886.80 -INFO:local_logger:Now training epoch 96. LR=0.000155 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-95-Loss-0.6825694100624208.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-95-Loss-0.6825694100624208.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-95-Loss-0.6825694100624208.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-95-Loss-0.6825694100624208.pdopt -INFO:local_logger:Now training epoch 96. LR=0.000155 -INFO:master_logger:Now training epoch 96. LR=0.000155 -INFO:local_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6925 -INFO:local_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6757 -INFO:local_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6684 -INFO:master_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6799 -INFO:local_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6804 -INFO:local_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6683 -INFO:local_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6801 -INFO:local_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6900 -INFO:local_logger:Epoch[096/800], Step[0000/0626], Avg Loss: 0.6837 -INFO:local_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6836 -INFO:master_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6841 -INFO:local_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[096/800], Step[0100/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6825 -INFO:master_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[096/800], Step[0200/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6819 -INFO:master_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[096/800], Step[0300/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6822 -INFO:master_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[096/800], Step[0400/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6821 -INFO:master_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[096/800], Step[0500/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6827 -INFO:master_logger:Epoch[096/800], Step[0600/0626], Avg Loss: 0.6824 -INFO:local_logger:----- Epoch[096/800], Train Loss: 0.6821, time: 868.69 -INFO:local_logger:Now training epoch 97. LR=0.000155 -INFO:local_logger:----- Epoch[096/800], Train Loss: 0.6821, time: 869.09 -INFO:local_logger:Now training epoch 97. LR=0.000155 -INFO:local_logger:----- Epoch[096/800], Train Loss: 0.6823, time: 868.99 -INFO:local_logger:Now training epoch 97. LR=0.000155 -INFO:local_logger:----- Epoch[096/800], Train Loss: 0.6826, time: 868.81 -INFO:local_logger:Now training epoch 97. LR=0.000155 -INFO:local_logger:----- Epoch[096/800], Train Loss: 0.6824, time: 868.69 -INFO:local_logger:----- Epoch[096/800], Train Loss: 0.6827, time: 864.96 -INFO:local_logger:Now training epoch 97. LR=0.000155 -INFO:master_logger:----- Epoch[096/800], Train Loss: 0.6824, time: 864.96 -INFO:local_logger:----- Epoch[096/800], Train Loss: 0.6828, time: 868.82 -INFO:local_logger:Now training epoch 97. LR=0.000155 -INFO:local_logger:----- Epoch[096/800], Train Loss: 0.6826, time: 868.66 -INFO:local_logger:Now training epoch 97. LR=0.000155 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-96-Loss-0.6826821214191926.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-96-Loss-0.6826821214191926.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-96-Loss-0.6826821214191926.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-96-Loss-0.6826821214191926.pdopt -INFO:local_logger:Now training epoch 97. LR=0.000155 -INFO:master_logger:Now training epoch 97. LR=0.000155 -INFO:local_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6880 -INFO:master_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6940 -INFO:local_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6844 -INFO:local_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6755 -INFO:local_logger:Epoch[097/800], Step[0000/0626], Avg Loss: 0.6928 -INFO:local_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6822 -INFO:master_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6831 -INFO:local_logger:Epoch[097/800], Step[0100/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6823 -INFO:master_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[097/800], Step[0200/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6822 -INFO:master_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[097/800], Step[0300/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6818 -INFO:master_logger:Epoch[097/800], Step[0400/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6826 -INFO:master_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[097/800], Step[0500/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6825 -INFO:master_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[097/800], Step[0600/0626], Avg Loss: 0.6814 -INFO:local_logger:----- Epoch[097/800], Train Loss: 0.6822, time: 881.18 -INFO:local_logger:Now training epoch 98. LR=0.000155 -INFO:local_logger:----- Epoch[097/800], Train Loss: 0.6818, time: 882.30 -INFO:local_logger:Now training epoch 98. LR=0.000155 -INFO:local_logger:----- Epoch[097/800], Train Loss: 0.6822, time: 882.31 -INFO:local_logger:Now training epoch 98. LR=0.000155 -INFO:local_logger:----- Epoch[097/800], Train Loss: 0.6826, time: 882.32 -INFO:local_logger:Now training epoch 98. LR=0.000155 -INFO:local_logger:----- Epoch[097/800], Train Loss: 0.6813, time: 882.38 -INFO:local_logger:Now training epoch 98. LR=0.000155 -INFO:local_logger:----- Epoch[097/800], Train Loss: 0.6820, time: 882.40 -INFO:local_logger:Now training epoch 98. LR=0.000155 -INFO:local_logger:----- Epoch[097/800], Train Loss: 0.6818, time: 878.64 -INFO:master_logger:----- Epoch[097/800], Train Loss: 0.6820, time: 878.64 -INFO:local_logger:----- Epoch[097/800], Train Loss: 0.6823, time: 882.40 -INFO:local_logger:Now training epoch 98. LR=0.000155 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-97-Loss-0.6818455600972856.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-97-Loss-0.6818455600972856.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-97-Loss-0.6818455600972856.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-97-Loss-0.6818455600972856.pdopt -INFO:local_logger:Now training epoch 98. LR=0.000155 -INFO:master_logger:Now training epoch 98. LR=0.000155 -INFO:local_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.6930 -INFO:local_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.6821 -INFO:master_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.6860 -INFO:local_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.6802 -INFO:local_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.6848 -INFO:local_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.6756 -INFO:local_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.7011 -INFO:local_logger:Epoch[098/800], Step[0000/0626], Avg Loss: 0.6899 -INFO:local_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6824 -INFO:master_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[098/800], Step[0100/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6826 -INFO:master_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[098/800], Step[0200/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6822 -INFO:master_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[098/800], Step[0300/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6827 -INFO:local_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6817 -INFO:master_logger:Epoch[098/800], Step[0400/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6823 -INFO:master_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[098/800], Step[0500/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6816 -INFO:master_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[098/800], Step[0600/0626], Avg Loss: 0.6820 -INFO:local_logger:----- Epoch[098/800], Train Loss: 0.6819, time: 870.20 -INFO:master_logger:----- Epoch[098/800], Train Loss: 0.6820, time: 870.20 -INFO:local_logger:----- Epoch[098/800], Train Loss: 0.6823, time: 875.16 -INFO:local_logger:Now training epoch 99. LR=0.000155 -INFO:local_logger:----- Epoch[098/800], Train Loss: 0.6820, time: 874.01 -INFO:local_logger:Now training epoch 99. LR=0.000155 -INFO:local_logger:----- Epoch[098/800], Train Loss: 0.6825, time: 874.00 -INFO:local_logger:Now training epoch 99. LR=0.000155 -INFO:local_logger:----- Epoch[098/800], Train Loss: 0.6817, time: 874.10 -INFO:local_logger:Now training epoch 99. LR=0.000155 -INFO:local_logger:----- Epoch[098/800], Train Loss: 0.6822, time: 874.02 -INFO:local_logger:Now training epoch 99. LR=0.000155 -INFO:local_logger:----- Epoch[098/800], Train Loss: 0.6815, time: 874.10 -INFO:local_logger:Now training epoch 99. LR=0.000155 -INFO:local_logger:----- Epoch[098/800], Train Loss: 0.6816, time: 874.02 -INFO:local_logger:Now training epoch 99. LR=0.000155 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-98-Loss-0.681889634827903.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-98-Loss-0.681889634827903.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-98-Loss-0.681889634827903.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-98-Loss-0.681889634827903.pdopt -INFO:local_logger:Now training epoch 99. LR=0.000155 -INFO:master_logger:Now training epoch 99. LR=0.000155 -INFO:local_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6968 -INFO:local_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6870 -INFO:local_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6865 -INFO:master_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6853 -INFO:local_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6660 -INFO:local_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6719 -INFO:local_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6836 -INFO:local_logger:Epoch[099/800], Step[0000/0626], Avg Loss: 0.6839 -INFO:local_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6834 -INFO:local_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6826 -INFO:master_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[099/800], Step[0100/0626], Avg Loss: 0.6826 -INFO:local_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6828 -INFO:local_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6822 -INFO:master_logger:Epoch[099/800], Step[0200/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6811 -INFO:master_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[099/800], Step[0300/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6825 -INFO:local_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6816 -INFO:master_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[099/800], Step[0400/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6815 -INFO:master_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[099/800], Step[0500/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6817 -INFO:master_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[099/800], Step[0600/0626], Avg Loss: 0.6817 -INFO:local_logger:----- Epoch[099/800], Train Loss: 0.6811, time: 873.44 -INFO:local_logger:Now training epoch 100. LR=0.000155 -INFO:local_logger:----- Epoch[099/800], Train Loss: 0.6817, time: 874.12 -INFO:local_logger:Now training epoch 100. LR=0.000155 -INFO:local_logger:----- Epoch[099/800], Train Loss: 0.6814, time: 874.14 -INFO:local_logger:Now training epoch 100. LR=0.000155 -INFO:local_logger:----- Epoch[099/800], Train Loss: 0.6815, time: 874.31 -INFO:local_logger:Now training epoch 100. LR=0.000155 -INFO:local_logger:----- Epoch[099/800], Train Loss: 0.6816, time: 874.69 -INFO:local_logger:Now training epoch 100. LR=0.000155 -INFO:local_logger:----- Epoch[099/800], Train Loss: 0.6820, time: 874.75 -INFO:local_logger:Now training epoch 100. LR=0.000155 -INFO:local_logger:----- Epoch[099/800], Train Loss: 0.6814, time: 871.01 -INFO:master_logger:----- Epoch[099/800], Train Loss: 0.6816, time: 871.01 -INFO:local_logger:----- Epoch[099/800], Train Loss: 0.6820, time: 874.69 -INFO:local_logger:Now training epoch 100. LR=0.000155 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-99-Loss-0.6813920508235197.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-99-Loss-0.6813920508235197.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-99-Loss-0.6813920508235197.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-99-Loss-0.6813920508235197.pdopt -INFO:local_logger:Now training epoch 100. LR=0.000155 -INFO:master_logger:Now training epoch 100. LR=0.000155 -INFO:local_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6874 -INFO:local_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6858 -INFO:local_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6763 -INFO:master_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6794 -INFO:local_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6727 -INFO:local_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6920 -INFO:local_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6764 -INFO:local_logger:Epoch[100/800], Step[0000/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6824 -INFO:local_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6801 -INFO:local_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6821 -INFO:master_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[100/800], Step[0100/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6804 -INFO:local_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6812 -INFO:master_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[100/800], Step[0200/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6812 -INFO:master_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[100/800], Step[0300/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6815 -INFO:master_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[100/800], Step[0400/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6812 -INFO:master_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[100/800], Step[0500/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6814 -INFO:master_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[100/800], Step[0600/0626], Avg Loss: 0.6814 -INFO:local_logger:----- Epoch[100/800], Train Loss: 0.6815, time: 871.01 -INFO:local_logger:Now training epoch 101. LR=0.000156 -INFO:local_logger:----- Epoch[100/800], Train Loss: 0.6814, time: 870.82 -INFO:local_logger:Now training epoch 101. LR=0.000156 -INFO:local_logger:----- Epoch[100/800], Train Loss: 0.6814, time: 871.44 -INFO:local_logger:Now training epoch 101. LR=0.000156 -INFO:local_logger:----- Epoch[100/800], Train Loss: 0.6812, time: 871.50 -INFO:local_logger:Now training epoch 101. LR=0.000156 -INFO:local_logger:----- Epoch[100/800], Train Loss: 0.6812, time: 867.05 -INFO:master_logger:----- Epoch[100/800], Train Loss: 0.6813, time: 867.05 -INFO:local_logger:----- Epoch[100/800], Train Loss: 0.6811, time: 872.20 -INFO:local_logger:Now training epoch 101. LR=0.000156 -INFO:local_logger:----- Epoch[100/800], Train Loss: 0.6814, time: 870.95 -INFO:local_logger:Now training epoch 101. LR=0.000156 -INFO:local_logger:----- Epoch[100/800], Train Loss: 0.6814, time: 871.00 -INFO:local_logger:Now training epoch 101. LR=0.000156 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-100-Loss-0.6812047341083004.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-100-Loss-0.6812047341083004.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-100-Loss-0.6812047341083004.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-100-Loss-0.6812047341083004.pdopt -INFO:local_logger:Now training epoch 101. LR=0.000156 -INFO:master_logger:Now training epoch 101. LR=0.000156 -INFO:local_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6922 -INFO:local_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6755 -INFO:local_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6834 -INFO:master_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6772 -INFO:local_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6966 -INFO:local_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6690 -INFO:local_logger:Epoch[101/800], Step[0000/0626], Avg Loss: 0.6717 -INFO:local_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6798 -INFO:local_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6819 -INFO:master_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[101/800], Step[0100/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6813 -INFO:master_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[101/800], Step[0200/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6807 -INFO:master_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[101/800], Step[0300/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6804 -INFO:local_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6810 -INFO:master_logger:Epoch[101/800], Step[0400/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6810 -INFO:master_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[101/800], Step[0500/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6808 -INFO:master_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[101/800], Step[0600/0626], Avg Loss: 0.6809 -INFO:local_logger:----- Epoch[101/800], Train Loss: 0.6805, time: 867.69 -INFO:local_logger:Now training epoch 102. LR=0.000156 -INFO:local_logger:----- Epoch[101/800], Train Loss: 0.6806, time: 868.18 -INFO:local_logger:Now training epoch 102. LR=0.000156 -INFO:local_logger:----- Epoch[101/800], Train Loss: 0.6806, time: 868.58 -INFO:local_logger:Now training epoch 102. LR=0.000156 -INFO:local_logger:----- Epoch[101/800], Train Loss: 0.6808, time: 864.53 -INFO:master_logger:----- Epoch[101/800], Train Loss: 0.6808, time: 864.53 -INFO:local_logger:----- Epoch[101/800], Train Loss: 0.6807, time: 868.26 -INFO:local_logger:Now training epoch 102. LR=0.000156 -INFO:local_logger:----- Epoch[101/800], Train Loss: 0.6810, time: 868.29 -INFO:local_logger:Now training epoch 102. LR=0.000156 -INFO:local_logger:----- Epoch[101/800], Train Loss: 0.6811, time: 868.42 -INFO:local_logger:Now training epoch 102. LR=0.000156 -INFO:local_logger:----- Epoch[101/800], Train Loss: 0.6812, time: 868.25 -INFO:local_logger:Now training epoch 102. LR=0.000156 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-101-Loss-0.6808065795139766.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-101-Loss-0.6808065795139766.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-101-Loss-0.6808065795139766.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-101-Loss-0.6808065795139766.pdopt -INFO:local_logger:Now training epoch 102. LR=0.000156 -INFO:master_logger:Now training epoch 102. LR=0.000156 -INFO:local_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6861 -INFO:master_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6794 -INFO:local_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6910 -INFO:local_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6753 -INFO:local_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6669 -INFO:local_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6722 -INFO:local_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6901 -INFO:local_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6797 -INFO:local_logger:Epoch[102/800], Step[0000/0626], Avg Loss: 0.6743 -INFO:local_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6830 -INFO:local_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6822 -INFO:local_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6813 -INFO:local_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6817 -INFO:master_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[102/800], Step[0100/0626], Avg Loss: 0.6804 -INFO:local_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6807 -INFO:master_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6819 -INFO:local_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[102/800], Step[0200/0626], Avg Loss: 0.6803 -INFO:local_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6814 -INFO:master_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6821 -INFO:local_logger:Epoch[102/800], Step[0300/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6816 -INFO:local_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6804 -INFO:local_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6810 -INFO:master_logger:Epoch[102/800], Step[0400/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6820 -INFO:local_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6811 -INFO:master_logger:Epoch[102/800], Step[0500/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6817 -INFO:local_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6810 -INFO:master_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6811 -INFO:local_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[102/800], Step[0600/0626], Avg Loss: 0.6813 -INFO:local_logger:----- Epoch[102/800], Train Loss: 0.6812, time: 872.04 -INFO:local_logger:Now training epoch 103. LR=0.000156 -INFO:local_logger:----- Epoch[102/800], Train Loss: 0.6812, time: 868.78 -INFO:master_logger:----- Epoch[102/800], Train Loss: 0.6811, time: 868.78 -INFO:local_logger:----- Epoch[102/800], Train Loss: 0.6809, time: 872.56 -INFO:local_logger:Now training epoch 103. LR=0.000156 -INFO:local_logger:----- Epoch[102/800], Train Loss: 0.6805, time: 872.77 -INFO:local_logger:Now training epoch 103. LR=0.000156 -INFO:local_logger:----- Epoch[102/800], Train Loss: 0.6808, time: 873.71 -INFO:local_logger:Now training epoch 103. LR=0.000156 -INFO:local_logger:----- Epoch[102/800], Train Loss: 0.6809, time: 873.10 -INFO:local_logger:Now training epoch 103. LR=0.000156 -INFO:local_logger:----- Epoch[102/800], Train Loss: 0.6815, time: 873.19 -INFO:local_logger:Now training epoch 103. LR=0.000156 -INFO:local_logger:----- Epoch[102/800], Train Loss: 0.6815, time: 873.09 -INFO:local_logger:Now training epoch 103. LR=0.000156 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-102-Loss-0.681159841605351.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-102-Loss-0.681159841605351.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-102-Loss-0.681159841605351.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-102-Loss-0.681159841605351.pdopt -INFO:local_logger:Now training epoch 103. LR=0.000156 -INFO:master_logger:Now training epoch 103. LR=0.000156 -INFO:local_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6787 -INFO:master_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6646 -INFO:local_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6794 -INFO:local_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6850 -INFO:local_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6838 -INFO:local_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6909 -INFO:local_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6721 -INFO:local_logger:Epoch[103/800], Step[0000/0626], Avg Loss: 0.6917 -INFO:local_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6811 -INFO:master_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6829 -INFO:local_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6810 -INFO:local_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6795 -INFO:local_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6823 -INFO:local_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6818 -INFO:local_logger:Epoch[103/800], Step[0100/0626], Avg Loss: 0.6802 -INFO:local_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6799 -INFO:local_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6810 -INFO:master_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6815 -INFO:local_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6804 -INFO:local_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6797 -INFO:local_logger:Epoch[103/800], Step[0200/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6803 -INFO:local_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6799 -INFO:local_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6803 -INFO:local_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6801 -INFO:local_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6806 -INFO:master_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[103/800], Step[0300/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6802 -INFO:local_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6800 -INFO:local_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6805 -INFO:master_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6814 -INFO:local_logger:Epoch[103/800], Step[0400/0626], Avg Loss: 0.6803 -INFO:local_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6802 -INFO:local_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6812 -INFO:local_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6802 -INFO:local_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6807 -INFO:local_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6808 -INFO:local_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6804 -INFO:master_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[103/800], Step[0500/0626], Avg Loss: 0.6804 -INFO:local_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6806 -INFO:local_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6801 -INFO:local_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6802 -INFO:master_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6805 -INFO:local_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6802 -INFO:local_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6809 -INFO:local_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6802 -INFO:local_logger:Epoch[103/800], Step[0600/0626], Avg Loss: 0.6806 -INFO:local_logger:----- Epoch[103/800], Train Loss: 0.6801, time: 859.70 -INFO:local_logger:Now training epoch 104. LR=0.000156 -INFO:local_logger:----- Epoch[103/800], Train Loss: 0.6803, time: 859.89 -INFO:local_logger:Now training epoch 104. LR=0.000156 -INFO:local_logger:----- Epoch[103/800], Train Loss: 0.6805, time: 859.89 -INFO:local_logger:Now training epoch 104. LR=0.000156 -INFO:local_logger:----- Epoch[103/800], Train Loss: 0.6809, time: 856.80 -INFO:master_logger:----- Epoch[103/800], Train Loss: 0.6805, time: 856.80 -INFO:local_logger:----- Epoch[103/800], Train Loss: 0.6806, time: 860.28 -INFO:local_logger:Now training epoch 104. LR=0.000156 -INFO:local_logger:----- Epoch[103/800], Train Loss: 0.6801, time: 859.89 -INFO:local_logger:Now training epoch 104. LR=0.000156 -INFO:local_logger:----- Epoch[103/800], Train Loss: 0.6802, time: 859.91 -INFO:local_logger:Now training epoch 104. LR=0.000156 -INFO:local_logger:----- Epoch[103/800], Train Loss: 0.6809, time: 860.42 -INFO:local_logger:Now training epoch 104. LR=0.000156 -INFO:local_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-103-Loss-0.6808819352382769.pdparams -INFO:local_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-103-Loss-0.6808819352382769.pdopt -INFO:master_logger:----- Save model: ./output/train-20211219-17-07-40/MAE-Epoch-103-Loss-0.6808819352382769.pdparams -INFO:master_logger:----- Save optim: ./output/train-20211219-17-07-40/MAE-Epoch-103-Loss-0.6808819352382769.pdopt -INFO:local_logger:Now training epoch 104. LR=0.000156 -INFO:master_logger:Now training epoch 104. LR=0.000156 -INFO:local_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6583 -INFO:local_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6660 -INFO:local_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6799 -INFO:master_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6722 -INFO:local_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6668 -INFO:local_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6885 -INFO:local_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6790 -INFO:local_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6707 -INFO:local_logger:Epoch[104/800], Step[0000/0626], Avg Loss: 0.6680 - - --------------------------------------- -C++ Traceback (most recent call last): --------------------------------------- -0 paddle::platform::GpuMemcpySync(void*, void const*, unsigned long, cudaMemcpyKind) - ----------------------- -Error Message Summary: ----------------------- -FatalError: `Termination signal` is detected by the operating system. - [TimeInfo: *** Aborted at 1639995159 (unix time) try "date -d @1639995159" if you are using GNU date ***] - [SignalInfo: *** SIGTERM (@0x84e5) received by PID 25456 (TID 0x7f771efbe700) from PID 34021 ***] - - - --------------------------------------- -C++ Traceback (most recent call last): --------------------------------------- -0 paddle::platform::GpuMemcpySync(void*, void const*, unsigned long, cudaMemcpyKind) - ----------------------- -Error Message Summary: ----------------------- -FatalError: `Termination signal` is detected by the operating system. - [TimeInfo: *** Aborted at 1639995171 (unix time) try "date -d @1639995171" if you are using GNU date ***] - [SignalInfo: *** SIGTERM (@0x84e5) received by PID 25537 (TID 0x7fcf37fc6700) from PID 34021 ***] - -Traceback (most recent call last): - File "main_multi_gpu_pretrain.py", line 416, in - main() - File "main_multi_gpu_pretrain.py", line 412, in main - dist.spawn(main_worker, args=(config, dataset_train, ), nprocs=config.NGPUS) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 502, in spawn - while not context.join(): - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 312, in join - self._throw_exception(error_index) - File "/opt/conda/envs/py36/lib/python3.6/site-packages/paddle/distributed/spawn.py", line 320, in _throw_exception - (error_index, name)) -Exception: Process 7 terminated with signal SIGTERM. -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 14 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 14 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 14 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 14 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 14 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 14 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 14 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 14 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 20 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 20 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 20 leaked semaphores to clean up at shutdown - len(cache)) -/opt/conda/envs/py36/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 20 leaked semaphores to clean up at shutdown - len(cache)) diff --git a/image_classification/MAE/random_erasing.py b/image_classification/MAE/random_erasing.py new file mode 100644 index 00000000..31eea465 --- /dev/null +++ b/image_classification/MAE/random_erasing.py @@ -0,0 +1,118 @@ +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Random Erasing for image tensor""" + +import random +import math +import paddle + + +def _get_pixels(per_pixel, rand_color, patch_size, dtype="float32"): + if per_pixel: + return paddle.normal(shape=patch_size).astype(dtype) + if rand_color: + return paddle.normal(shape=(patch_size[0], 1, 1)).astype(dtype) + return paddle.zeros((patch_size[0], 1, 1)).astype(dtype) + + +class RandomErasing(object): + """ + Args: + prob: probability of performing random erasing + min_area: Minimum percentage of erased area wrt input image area + max_area: Maximum percentage of erased area wrt input image area + min_aspect: Minimum aspect ratio of earsed area + max_aspect: Maximum aspect ratio of earsed area + mode: pixel color mode, in ['const', 'rand', 'pixel'] + 'const' - erase block is constant valued 0 for all channels + 'rand' - erase block is valued random color (same per-channel) + 'pixel' - erase block is vauled random color per pixel + min_count: Minimum # of ereasing blocks per image. + max_count: Maximum # of ereasing blocks per image. Area per box is scaled by count + per-image count is randomly chosen between min_count to max_count + """ + def __init__(self, prob=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None, + mode='const', min_count=1, max_count=None, num_splits=0): + self.prob = prob + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + mode = mode.lower() + self.rand_color = False + self.per_pixel = False + if mode == "rand": + self.rand_color = True + elif mode == "pixel": + self.per_pixel = True + else: + assert not mode or mode == "const" + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.prob: + return + area = img_h * img_w + count = self.min_count if self.min_count == self.max_count else \ + random.randint(self.min_count, self.max_count) + for _ in range(count): + for attempt in range(10): + target_area = random.uniform(self.min_area, self.max_area) * area / count + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top:top+h, left:left+w] = _get_pixels( + self.per_pixel, self.rand_color, (chan, h, w), + dtype=dtype) + break + + def __call__(self, input): + if len(input.shape) == 3: + self._erase(input, *input.shape, input.dtype) + else: + batch_size, chan, img_h, img_w = input.shape + batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input + + + +#def main(): +# re = RandomErasing(prob=1.0, min_area=0.2, max_area=0.6, mode='rand') +# #re = RandomErasing(prob=1.0, min_area=0.2, max_area=0.6, mode='const') +# #re = RandomErasing(prob=1.0, min_area=0.2, max_area=0.6, mode='pixel') +# import PIL.Image as Image +# import numpy as np +# paddle.set_device('cpu') +# img = paddle.to_tensor(np.asarray(Image.open('./lenna.png'))).astype('float32') +# img = img / 255.0 +# img = paddle.transpose(img, [2, 0, 1]) +# new_img = re(img) +# new_img = new_img * 255.0 +# new_img = paddle.transpose(new_img, [1, 2, 0]) +# new_img = new_img.cpu().numpy() +# new_img = Image.fromarray(new_img.astype('uint8')) +# new_img.save('./res.png') +# +# +# +#if __name__ == "__main__": +# main() diff --git a/image_classification/MAE/run_finetune.sh b/image_classification/MAE/run_finetune.sh deleted file mode 100644 index c4d60575..00000000 --- a/image_classification/MAE/run_finetune.sh +++ /dev/null @@ -1,8 +0,0 @@ -CUDA_VISIBLE_DEVICES=0 \ -python main_single_gpu_finetune.py \ --cfg='./configs/vit_base_patch16_224_finetune.yaml' \ --dataset='imagenet2012' \ --batch_size=8 \ --data_path='/dataset/imagenet' \ --amp \ --pretrained='./output/train-20211203-14-42-46/MAE-Epoch-10-Loss-0' diff --git a/image_classification/MAE/run_finetune_multi.sh b/image_classification/MAE/run_finetune_multi.sh index 719a5cd1..7d369a54 100644 --- a/image_classification/MAE/run_finetune_multi.sh +++ b/image_classification/MAE/run_finetune_multi.sh @@ -2,6 +2,7 @@ CUDA_VISIBLE_DEVICES=0,1 \ python main_multi_gpu_finetune.py \ -cfg='./configs/vit_base_patch16_224_finetune.yaml' \ -dataset='imagenet2012' \ --batch_size=8 \ +-batch_size=2 \ -data_path='/dataset/imagenet' \ -amp \ +-pretrained='./output/train-20220125-17-48-06/PRETRAIN-Epoch-99-Loss-0.5566961133140487' diff --git a/image_classification/MAE/run_linear_probe_multi.sh b/image_classification/MAE/run_linear_probe_multi.sh new file mode 100644 index 00000000..5d8ffd72 --- /dev/null +++ b/image_classification/MAE/run_linear_probe_multi.sh @@ -0,0 +1,8 @@ +CUDA_VISIBLE_DEVICES=0,1 \ +python main_multi_gpu_linearprobe.py \ +-cfg='./configs/vit_base_patch16_224_linearprobe.yaml' \ +-dataset='imagenet2012' \ +-batch_size=2 \ +-data_path='/dataset/imagenet' \ +-amp \ +-pretrained='./output/train-20220125-17-48-06/PRETRAIN-Epoch-99-Loss-0.5566961133140487' diff --git a/image_classification/MAE/run_pretrain.sh b/image_classification/MAE/run_pretrain.sh deleted file mode 100644 index 8c5b1b7b..00000000 --- a/image_classification/MAE/run_pretrain.sh +++ /dev/null @@ -1,8 +0,0 @@ -CUDA_VISIBLE_DEVICES=0 \ -python main_single_gpu_pretrain.py \ --cfg='./configs/vit_base_patch16_224_pretrain.yaml' \ --dataset='imagenet2012' \ --batch_size=8 \ --data_path='/dataset/imagenet' \ --mae_pretrain \ -#-amp diff --git a/image_classification/MAE/run_pretrain_multi.sh b/image_classification/MAE/run_pretrain_multi.sh index 6fb6b864..940fa6dd 100644 --- a/image_classification/MAE/run_pretrain_multi.sh +++ b/image_classification/MAE/run_pretrain_multi.sh @@ -1,8 +1,7 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4 \ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ python main_multi_gpu_pretrain.py \ --cfg='./configs/vit_base_patch16_224_pretrain_dec1.yaml' \ +-cfg='./configs/vit_base_patch16_224_pretrain.yaml' \ -dataset='imagenet2012' \ --batch_size=8 \ +-batch_size=256 \ -data_path='/dataset/imagenet' \ --mae_pretrain \ -#-amp +-amp diff --git a/image_classification/MAE/run_pretrain_multi_resume.sh b/image_classification/MAE/run_pretrain_multi_resume.sh deleted file mode 100644 index 1ff2fd94..00000000 --- a/image_classification/MAE/run_pretrain_multi_resume.sh +++ /dev/null @@ -1,10 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ -python main_multi_gpu_pretrain.py \ --cfg='./configs/vit_base_patch16_224_pretrain.yaml' \ --dataset='imagenet2012' \ --batch_size=256 \ --data_path='/dataset/imagenet' \ --resume='./output/train-20211210-08-41-14/MAE-Epoch-12-Loss-0.9377176860235059' \ --last_epoch=12 \ --mae_pretrain \ --amp diff --git a/image_classification/MAE/stat_define.py b/image_classification/MAE/stat_define.py deleted file mode 100644 index 963482d7..00000000 --- a/image_classification/MAE/stat_define.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import glob -import paddle -from config import get_config -from transformer import build_mae_pretrain as build_model - -def count_gelu(layer, inputs, output): - activation_flops = 8 - x = inputs[0] - num = x.numel() - layer.total_ops += num * activation_flops - - -def count_softmax(layer, inputs, output): - softmax_flops = 5 # max/substract, exp, sum, divide - x = inputs[0] - num = x.numel() - layer.total_ops += num * softmax_flops - - -def count_layernorm(layer, inputs, output): - layer_norm_flops = 5 # get mean (sum), get variance (square and sum), scale(multiply) - x = inputs[0] - num = x.numel() - layer.total_ops += num * layer_norm_flops - - -cfg = './configs/vit_large_patch32_384.yaml' -#input_size = (1, 3, 224, 224) -input_size = (1, 3, 384, 384) -config = get_config(cfg) -model = build_model(config) - -custom_ops = {paddle.nn.GELU: count_gelu, - paddle.nn.LayerNorm: count_layernorm, - paddle.nn.Softmax: count_softmax, - } -print(os.path.basename(cfg)) -paddle.flops(model, - input_size=input_size, - custom_ops=custom_ops, - print_detail=False) - - -#for cfg in glob.glob('./configs/*.yaml'): -# #cfg = './configs/swin_base_patch4_window7_224.yaml' -# input_size = (1, 3, int(cfg[-8:-5]), int(cfg[-8:-5])) -# config = get_config(cfg) -# model = build_model(config) -# -# -# custom_ops = {paddle.nn.GELU: count_gelu, -# paddle.nn.LayerNorm: count_layernorm, -# paddle.nn.Softmax: count_softmax, -# } -# print(os.path.basename(cfg)) -# paddle.flops(model, -# input_size=input_size, -# custom_ops=custom_ops, -# print_detail=False) -# print('-----------') diff --git a/image_classification/MAE/tests/__init__.py b/image_classification/MAE/tests/__init__.py deleted file mode 100644 index 84952a81..00000000 --- a/image_classification/MAE/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# init \ No newline at end of file diff --git a/image_classification/MAE/tests/test_config.py b/image_classification/MAE/tests/test_config.py deleted file mode 100644 index 6806e8a1..00000000 --- a/image_classification/MAE/tests/test_config.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import argparse -from config import update_config, get_config - -class ConfigTest(unittest.TestCase): - def setUp(self): - parser = argparse.ArgumentParser('') - parser.add_argument('-cfg', type=str, default=None) - parser.add_argument('-dataset', type=str, default="cifar10") - parser.add_argument('-batch_size', type=int, default=128) - parser.add_argument('-image_size', type=int, default=256) - parser.add_argument('-ngpus', type=int, default=None) - parser.add_argument('-data_path', type=str, default='/cifar10/') - parser.add_argument('-eval', action='store_false') # enable eval - parser.add_argument('-pretrained', type=str, default='pretrained') - parser.add_argument('-resume', type=str, default=None) - parser.add_argument('-last_epoch', type=int, default=None) - self.args = parser.parse_args() - - def tearDown(self): - pass - - def test_update_config(self): - config = get_config() - config = update_config(config, self.args) - - self.assertEqual(config.DATA.DATASET, 'cifar10') - self.assertEqual(config.DATA.BATCH_SIZE, 128) - self.assertEqual(config.DATA.IMAGE_SIZE, 256) - self.assertEqual(config.DATA.DATA_PATH, '/cifar10/') - self.assertEqual(config.EVAL, True) - self.assertEqual(config.DATA.BATCH_SIZE_EVAL, 128) - self.assertEqual(config.MODEL.PRETRAINED, 'pretrained') - - def test_update_config_from_file(self): - config = get_config() - self.args.cfg = './tests/test_config.yaml' - self.args.image_size = None - self.args.ngpus = None - config = update_config(config, self.args) - - self.assertEqual(config.DATA.IMAGE_SIZE, 384) - self.assertEqual(config.DATA.CROP_PCT, 1.0) - - self.assertEqual(config.MODEL.TRANS.PATCH_SIZE, 16) - self.assertEqual(config.MODEL.TRANS.EMBED_DIM, 768) - self.assertEqual(config.MODEL.TRANS.MLP_RATIO, 4.0) - self.assertEqual(config.MODEL.TRANS.DEPTH, 12) - self.assertEqual(config.MODEL.TRANS.NUM_HEADS, 12) - self.assertEqual(config.MODEL.TRANS.QKV_BIAS, True) - - self.assertEqual(config.MODEL.NAME, 'vit_base_patch16_224') - self.assertEqual(config.MODEL.TYPE, 'ViT') - - def test_get_config(self): - config1 = get_config() - config2 = get_config() - self.assertEqual(config1, config2) diff --git a/image_classification/MAE/tests/test_config.yaml b/image_classification/MAE/tests/test_config.yaml deleted file mode 100644 index 19709906..00000000 --- a/image_classification/MAE/tests/test_config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -DATA: - IMAGE_SIZE: 384 - CROP_PCT: 1.0 -MODEL: - TYPE: ViT - NAME: vit_base_patch16_224 - TRANS: - PATCH_SIZE: 16 - EMBED_DIM: 768 - MLP_RATIO: 4.0 - DEPTH: 12 - NUM_HEADS: 12 - QKV_BIAS: true - diff --git a/image_classification/MAE/tests/test_datasets.py b/image_classification/MAE/tests/test_datasets.py deleted file mode 100644 index 79952137..00000000 --- a/image_classification/MAE/tests/test_datasets.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import argparse -from config import * -from datasets import * -from paddle.io import DataLoader -#from multiprocessing import SimpleQueue - -#paddle.set_device('cpu') - -class DatasetTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - parser = argparse.ArgumentParser('') - parser.add_argument('-cfg', type=str, default=None) - parser.add_argument('-dataset', type=str, default='imagenet2012') - parser.add_argument('-batch_size', type=int, default=4) - parser.add_argument('-image_size', type=int, default=224) - parser.add_argument('-ngpus', type=int, default=None) - parser.add_argument('-data_path', type=str, default='/dataset/imagenet') - parser.add_argument('-eval', action='store_true') - parser.add_argument('-pretrained', type=str, default=None) - parser.add_argument('-resume', type=str, default=None) - parser.add_argument('-last_epoch', type=int, default=None) - cls.args = parser.parse_args() - cls.config = get_config() - cls.config = update_config(cls.config, cls.args) - - cls.dataset_train = get_dataset(DatasetTest.config, mode='train') - cls.dataset_test = get_dataset(DatasetTest.config, mode='val') - - @classmethod - def tearDown(cls): - pass - - @unittest.skip('skip for debug') - def test_shape(self): - sample = next(iter(DatasetTest.dataset_train)) - self.assertEqual([3, 224, 224], sample[0].shape) - - sample = next(iter(DatasetTest.dataset_test)) - self.assertEqual([3, 224, 224], sample[0].shape) - - @unittest.skip('skip for debug') - def test_scaling(self): - sample = next(iter(DatasetTest.dataset_train))[0] - self.assertTrue(paddle.any(sample < 0)) - self.assertTrue(paddle.any(sample > 0)) - self.assertGreaterEqual(1, sample.max().cpu().numpy()) - self.assertLessEqual(-1, sample.min().cpu().numpy()) - - sample = next(iter(DatasetTest.dataset_test))[0] - self.assertGreaterEqual(1, sample.max().cpu().numpy()) - self.assertLessEqual(-1, sample.min().cpu().numpy()) - self.assertTrue(paddle.any(sample < 0)) - self.assertTrue(paddle.any(sample > 0)) - - @unittest.skip('skip for debug') - def test_single_process_dataloader(self): - self._test_loader(DatasetTest.dataset_train, 'train', False) - self._test_loader(DatasetTest.dataset_test, 'test', False) - - def _test_loader(self, dataset, mode, multi_process): - dataloader = get_dataloader(DatasetTest.config, - dataset, - mode=mode, - multi_process=multi_process) - for idx, _ in enumerate(dataloader): - if idx > 0 and idx % 1 == 0: - print(f'----- test single process dataloader: {idx}/{len(dataloader)}') - if idx == 10: - return - - @unittest.skip('skip for debug') - def test_multi_process_dataloader(self): - tester = Tester() - tester.run() - self.assertEqual(tester.n_samples, 50000) - - - - -class Tester: - def __init__(self): - parser = argparse.ArgumentParser('') - parser.add_argument('-cfg', type=str, default=None) - parser.add_argument('-dataset', type=str, default='imagenet2012') - parser.add_argument('-batch_size', type=int, default=256) - parser.add_argument('-image_size', type=int, default=224) - parser.add_argument('-data_path', type=str, default='/dataset/imagenet/') - parser.add_argument('-eval', action='store_false') # set test batch size - parser.add_argument('-pretrained', type=str, default=None) - args = parser.parse_args() - self.config = get_config() - self.config = update_config(self.config, args) - self.dataset_train = get_dataset(self.config, mode='train') - self.dataset_test = get_dataset(self.config, mode='val') - self.n_samples = 0 - - def run(self, mode='test'): - # https://github.com/PaddlePaddle/Paddle/blob/5d8e4395b61929627151f6fd4a607589288a78bf/python/paddle/distributed/spawn.py#L272 - context = dist.spawn(self.main_worker, args=(mode,)) - self.n_samples = context.return_queues[0].get() - print(f'----- total samples: {self.n_samples}') - - def main_worker(self, *args): - mode = args[0] - dist.init_parallel_env() - local_rank = dist.get_rank() - if mode == 'train': - n_samples = self._test_loader(self.config, self.dataset_train, 'train', True) - else: - n_samples = self._test_loader(self.config, self.dataset_test, 'test', True) - - n_samples = paddle.to_tensor(np.array([n_samples])) - dist.reduce(n_samples, 0) - if local_rank == 0: - return n_samples.cpu().numpy() - - - def _test_loader(self, config, dataset, mode, multi_process): - n_samples = 0 - dataloader = get_dataloader(config, - dataset, - mode=mode, - multi_process=multi_process) - local_rank = dist.get_rank() - for idx, data in enumerate(dataloader): - if idx > 0 and idx % 1 == 0: - print(f'----- test single process({local_rank}) dataloader: {idx}/{len(dataloader)}') - #print(local_rank, data[1]) - n_samples += data[0].shape[0] - - return n_samples diff --git a/image_classification/MAE/tests/test_transformer.py b/image_classification/MAE/tests/test_transformer.py deleted file mode 100644 index bbfefc49..00000000 --- a/image_classification/MAE/tests/test_transformer.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import numpy as np -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -from config import * -from transformer import build_mae_pretrain -from transformer import PatchEmbedding -from transformer import Attention -from transformer import Mlp -from transformer import Encoder - - -class TransformerTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - paddle.set_device('cpu') - cls.config = get_config() - cls.dummy_img = np.random.randn(4, 3, 224, 224).astype('float32') - cls.dummy_tensor = paddle.to_tensor(cls.dummy_img) - cls.mae = build_mae_pretrain(cls.config) - cls.mae.train() - - @classmethod - def tearDown(cls): - pass - - # @unittest.skip('skip for debug') - def test_out_shape(self): - reconstruct, mask = TransformerTest.mae(TransformerTest.dummy_tensor) - self.assertEqual(reconstruct.shape, [4, 49, 768]) - self.assertEqual(mask.shape, [4, 49, 768]) - - @unittest.skip('skip for debug') - def test_all_parameters_updated(self): - optim = paddle.optimizer.SGD(parameters=TransformerTest.mae.parameters(), learning_rate=0.1) - reconstruct, masked_image = TransformerTest.mae(TransformerTest.dummy_tensor) - loss = F.mse_loss(reconstruct, masked_image) - loss.backward() - - for name, param in TransformerTest.mae.named_parameters(): - if not param.stop_gradient: - self.assertIsNotNone(param.gradient()) - # self.assertNotEqual(0, np.sum(param.gradient() ** 2)) - - # @unittest.skip('skip for debug') - def test_embeddings(self): - embed = PatchEmbedding() - dummy_img = np.random.randn(4, 3, 224, 224).astype('float32') - dummy_tensor = paddle.to_tensor(dummy_img) - - patch_out = embed.patch_embedding(dummy_tensor) - embed_out = embed(dummy_tensor) - self.assertEqual(patch_out.shape, [4, 768, 14, 14]) - self.assertEqual(embed.cls_token.shape, [1, 1, 768]) - self.assertEqual(embed_out.shape, [4, 14 * 14 + 1, 768]) - - # @unittest.skip('skip for debug') - def test_attention(self): - attn_op = Attention( - TransformerTest.config.MODEL.TRANS.ENCODER.EMBED_DIM, - TransformerTest.config.MODEL.TRANS.ENCODER.NUM_HEADS, - TransformerTest.config.MODEL.TRANS.QKV_BIAS) - dummy_img = np.random.randn(4, 50, 768).astype('float32') - dummy_tensor = paddle.to_tensor(dummy_img) - - out, attn = attn_op(dummy_tensor) - self.assertEqual(attn.shape, [4, 12, 50, 50]) - self.assertEqual(out.shape, [4, 50, 768]) - - def test_mlp(self): - mlp_op = Mlp( - TransformerTest.config.MODEL.TRANS.ENCODER.EMBED_DIM, - TransformerTest.config.MODEL.TRANS.MLP_RATIO) - dummy_img = np.random.randn(4, 50, 768).astype('float32') - dummy_tensor = paddle.to_tensor(dummy_img) - - out = mlp_op(dummy_tensor) - self.assertEqual(out.shape, [4, 50, 768]) - - def test_position_embedding_not_update(self): - origin = TransformerTest.mae.position_embedding.get_encoder_embedding().clone() - optim = paddle.optimizer.SGD(parameters=TransformerTest.mae.parameters(), learning_rate=0.1) - reconstruct, masked_image = TransformerTest.mae(TransformerTest.dummy_tensor) - loss = F.mse_loss(reconstruct, masked_image) - loss.backward() - optim.step() - update = TransformerTest.mae.position_embedding.get_encoder_embedding().clone() - self.assertTrue((origin.numpy() == update.numpy()).all()) - - def test_encoder(self): - encoder_op = Encoder( - TransformerTest.config.MODEL.TRANS.ENCODER.EMBED_DIM, - TransformerTest.config.MODEL.TRANS.ENCODER.NUM_HEADS, - TransformerTest.config.MODEL.TRANS.ENCODER.DEPTH, - ) - dummy_img = np.random.randn(4, 50, 768).astype('float32') - dummy_tensor = paddle.to_tensor(dummy_img) - - out, _ = encoder_op(dummy_tensor) - self.assertEqual(out.shape, [4, 50, 768]) diff --git a/image_classification/MAE/tests/test_utils.py b/image_classification/MAE/tests/test_utils.py deleted file mode 100644 index 49366af4..00000000 --- a/image_classification/MAE/tests/test_utils.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import paddle -import paddle.nn as nn -from utils import AverageMeter -from utils import WarmupCosineScheduler -from utils import get_exclude_from_weight_decay_fn - - -class UtilTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - pass - - @classmethod - def tearDown(cls): - pass - - def test_average_meter(self): - meter = AverageMeter() - for i in range(1, 101): - meter.update(i, 1) - self.assertEqual(meter.avg, 50.5) - - def test_warmup_cosine_scheduler(self): - sch = WarmupCosineScheduler(learning_rate=0.1, - warmup_start_lr=1e-5, - start_lr=0.1, - end_lr=0.0, - warmup_epochs=10, - total_epochs=100, - last_epoch=-1) - lrs = [] - for epoch in range(100): - lr = sch.get_lr() - lrs.append(lr) - sch.step() - lrs.append(sch.get_lr()) - - self.assertEqual(lrs[0], 1e-5) - self.assertEqual(lrs[10], 0.1) - self.assertEqual(lrs[-1], 0.0) - self.assertGreaterEqual(min(lrs[0:10]), 1e-5) - self.assertLessEqual(max(lrs[0:10]), 0.1) - self.assertGreaterEqual(min(lrs[10::]), 0.0) - self.assertLessEqual(max(lrs[10::]), 0.1) - - def test_warmup_cosine_scheduler_last_epoch(self): - sch = WarmupCosineScheduler(learning_rate=0.1, - warmup_start_lr=1e-5, - start_lr=0.1, - end_lr=0.0, - warmup_epochs=10, - total_epochs=100, - last_epoch=9) - lrs = [] - for epoch in range(10, 100): - lr = sch.get_lr() - lrs.append(lr) - sch.step() - lrs.append(sch.get_lr()) - - self.assertEqual(lrs[0], 0.1) - self.assertEqual(lrs[-1], 0.0) - self.assertGreaterEqual(min(lrs[::]), 0.0) - self.assertLessEqual(max(lrs[::]), 0.1) - - def test_get_exclude_from_weight_decay_fn(self): - model = nn.Linear(10, 100, bias_attr=True) - exclude_list = ['bias'] - fn = get_exclude_from_weight_decay_fn(exclude_list) - # should return false if name in exclude_list - for name, param in model.named_parameters(): - if name.endswith('weight'): - self.assertTrue(fn(name)) - elif name.endswith('bias'): - self.assertFalse(fn(name)) diff --git a/image_classification/MAE/transformer.py b/image_classification/MAE/transformer.py index 62704ed8..0fadf67f 100644 --- a/image_classification/MAE/transformer.py +++ b/image_classification/MAE/transformer.py @@ -26,59 +26,28 @@ from config import get_config -def get_position_encoding(seq_len, embed_dim): - """ sinusoid position encoding table""" - def get_position_angle_vec(embed_dim, position): - return [position / np.power(10000, 2 * (hid_j // 2) / embed_dim) for hid_j in range(embed_dim)] - - sinusoid_table = np.array([get_position_angle_vec(embed_dim, pos_i) for pos_i in range(seq_len)]) - sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i - sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 - position_embedding = paddle.to_tensor([sinusoid_table]) - return position_embedding - - class Identity(nn.Layer): """ Identity layer The output of this layer is the input without any change. Use this layer to avoid using 'if' condition in forward methods """ def __init__(self): - super().__init__() + super(Identity, self).__init__() def forward(self, x): return x -class PositionalEmbedding(nn.Layer): - """Position Embedding - - Apply positional embedding on input images. - - Attributes: - position_embedding: sine-cosine version positional embedding - """ - def __init__(self, embed_dim, seq_len=197): - """ Sinusoid position encoding table """ - super().__init__() - self.seq_len = seq_len - - def get_position_angle_vec(embed_dim, position): - return [position / np.power(10000, 2 * (hid_j // 2) / embed_dim) for hid_j in range(embed_dim)] - - sinusoid_table = np.array([get_position_angle_vec( - embed_dim, pos_i) for pos_i in range(seq_len)]) - sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i - sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 - position_embedding = paddle.to_tensor([sinusoid_table]) - - self.register_buffer('position_embedding', - position_embedding) +def get_position_encoding(seq_len, embed_dim): + """ sinusoid position encoding table""" + def get_position_angle_vec(embed_dim, position): + return [position / np.power(10000, 2 * (hid_j // 2) / embed_dim) for hid_j in range(embed_dim)] - def get_positional_embedding(self, seq_length=None): - if seq_length is None: - seq_length = self.seq_len - return self.position_embedding[:, :seq_length, :] + sinusoid_table = np.array([get_position_angle_vec(embed_dim, pos_i) for pos_i in range(seq_len)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + position_embedding = paddle.to_tensor([sinusoid_table]) + return position_embedding class PatchEmbedding(nn.Layer): @@ -98,29 +67,19 @@ def __init__(self, embed_dim=768, dropout=0.): super().__init__() - n_patches = (image_size // patch_size) * (image_size // patch_size) - + self.n_patches = (image_size // patch_size) * (image_size // patch_size) self.patch_embedding = nn.Conv2D(in_channels=in_channels, out_channels=embed_dim, kernel_size=patch_size, stride=patch_size) - - self.cls_token = paddle.create_parameter( - shape=[1, 1, embed_dim], - dtype='float32', - default_initializer=paddle.nn.initializer.Constant(0)) - self.dropout = nn.Dropout(dropout) def forward(self, x): - cls_tokens = self.cls_token.expand( - (x.shape[0], -1, -1)) x = self.patch_embedding(x) x = x.flatten(2) x = x.transpose([0, 2, 1]) - x = paddle.concat((cls_tokens, x), axis=1) - embeddings = self.dropout(x) - return embeddings + x = self.dropout(x) + return x class Attention(nn.Layer): @@ -140,6 +99,7 @@ class Attention(nn.Layer): proj_dropout: final dropout before output softmax: softmax op for attention """ + def __init__(self, embed_dim, num_heads, @@ -211,9 +171,9 @@ class Mlp(nn.Layer): fc1: nn.Linear fc2: nn.Linear act: GELU - dropout1: dropout after fc1 - dropout2: dropout after fc2 + dropout: dropout after fc """ + def __init__(self, embed_dim, mlp_ratio, @@ -231,8 +191,7 @@ def __init__(self, weight_attr=w_attr_2, bias_attr=b_attr_2) self.act = nn.GELU() - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) + self.dropout = nn.Dropout(dropout) def _init_weights(self): weight_attr = paddle.ParamAttr( @@ -244,9 +203,9 @@ def _init_weights(self): def forward(self, x): x = self.fc1(x) x = self.act(x) - x = self.dropout1(x) + x = self.dropout(x) x = self.fc2(x) - x = self.dropout2(x) + x = self.dropout(x) return x @@ -262,6 +221,7 @@ class TransformerLayer(nn.Layer): mlp: mlp modual attn: attention modual """ + def __init__(self, embed_dim, num_heads, @@ -271,26 +231,22 @@ def __init__(self, attention_dropout=0., droppath=0.): super().__init__() - w_attr_1, b_attr_1 = self._init_weights() self.attn_norm = nn.LayerNorm(embed_dim, weight_attr=w_attr_1, bias_attr=b_attr_1, epsilon=1e-6) - self.attn = Attention(embed_dim, num_heads, qkv_bias, dropout, attention_dropout) self.drop_path = DropPath(droppath) if droppath > 0. else Identity() - w_attr_2, b_attr_2 = self._init_weights() self.mlp_norm = nn.LayerNorm(embed_dim, weight_attr=w_attr_2, bias_attr=b_attr_2, epsilon=1e-6) - self.mlp = Mlp(embed_dim, mlp_ratio, dropout) def _init_weights(self): @@ -321,8 +277,9 @@ class Encoder(nn.Layer): Attributes: layers: nn.LayerList contains multiple TransformerLayers - encoder_norm: nn.LayerNorm which is applied after last encoder layer + norm: nn.LayerNorm which is applied after last encoder layer """ + def __init__(self, embed_dim, num_heads, @@ -331,28 +288,30 @@ def __init__(self, mlp_ratio=4.0, dropout=0., attention_dropout=0., - droppath=0.): - super().__init__() + droppath=0., + has_norm=True): + super(Encoder, self).__init__() # stochatic depth decay depth_decay = [x.item() for x in paddle.linspace(0, droppath, depth)] layer_list = [] for i in range(depth): layer_list.append(TransformerLayer(embed_dim, - num_heads, - qkv_bias, - mlp_ratio, - dropout, - attention_dropout, - droppath=depth_decay[i])) - # new paddle version fix this, deepcopy is no longer needed - # layer_list.append(copy.deepcopy(encoder_layer)) + num_heads, + qkv_bias, + mlp_ratio, + dropout, + attention_dropout, + droppath=depth_decay[i])) self.layers = nn.LayerList(layer_list) - w_attr, b_attr = self._init_weights() - self.encoder_norm = nn.LayerNorm(embed_dim, - weight_attr=w_attr, - bias_attr=b_attr, - epsilon=1e-6) + # move this norm out to upper level for global_pool (no cls_token settings) + self.has_norm = has_norm + if has_norm: + w_attr, b_attr = self._init_weights() + self.norm = nn.LayerNorm(embed_dim, + weight_attr=w_attr, + bias_attr=b_attr, + epsilon=1e-6) def _init_weights(self): weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(1.0)) @@ -362,8 +321,10 @@ def _init_weights(self): def forward(self, x): for layer in self.layers: x = layer(x) - out = self.encoder_norm(x) - return out + + if self.has_norm: + x = self.norm(x) + return x class Decoder(nn.Layer): @@ -373,7 +334,7 @@ class Decoder(nn.Layer): Attributes: layers: nn.LayerList contains multiple TransformerLayers - decoder_norm: nn.LayerNorm which is applied after last encoder layer + norm: nn.LayerNorm which is applied after last encoder layer """ def __init__(self, @@ -385,7 +346,7 @@ def __init__(self, dropout=0., attention_dropout=0., droppath=0.): - super().__init__() + super(Decoder, self).__init__() # stochatic depth decay depth_decay = [x.item() for x in paddle.linspace(0, droppath, depth)] @@ -398,29 +359,23 @@ def __init__(self, dropout, attention_dropout, droppath=depth_decay[i])) - # new paddle version fix this, deepcopy is no longer needed - # layer_list.append(copy.deepcopy(encoder_layer)) self.layers = nn.LayerList(layer_list) w_attr, b_attr = self._init_weights() - self.decoder_norm = nn.LayerNorm(embed_dim, - weight_attr=w_attr, - bias_attr=b_attr, - epsilon=1e-6) + self.norm = nn.LayerNorm(embed_dim, + weight_attr=w_attr, + bias_attr=b_attr, + epsilon=1e-6) def _init_weights(self): weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(1.0)) bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.0)) return weight_attr, bias_attr - def forward(self, x, mask_len=0): + def forward(self, x): for layer in self.layers: x = layer(x) - if mask_len > 0: - # only sustain masked patches - out = self.decoder_norm(x[:, -mask_len:]) - else: - out = self.decoder_norm(x) + out = self.norm(x) return out @@ -461,57 +416,75 @@ def __init__(self, qkv_bias=True, dropout=0., attention_dropout=0., - droppath=0.): + droppath=0., + norm_pix_loss=False): super().__init__() - self.patch_size = patch_size self.num_patches = (image_size // patch_size) * (image_size // patch_size) - self.mask_token = paddle.create_parameter( - shape=[1, 1, decoder_embed_dim], + self.patch_size = patch_size + # -------------------- Encoder -------------------- + self.patch_embedding = PatchEmbedding( + image_size, + patch_size, + in_channels, + encoder_embed_dim, + dropout) + + self.cls_token = paddle.create_parameter( + shape=[1, 1, encoder_embed_dim], dtype='float32', - default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) - self.perm = None - self.mask_num = None - # create positional embedding - self.encoder_position_embedding = get_position_encoding(seq_len=1 + self.num_patches, - embed_dim=encoder_embed_dim) - self.decoder_position_embedding = get_position_encoding(seq_len=1 + self.num_patches, - embed_dim=decoder_embed_dim) - # create patch embedding with positional embedding - self.patch_embedding = PatchEmbedding(image_size, - patch_size, - in_channels, - encoder_embed_dim, - dropout) - # create multi head self-attention encoder - self.encoder = Encoder(encoder_embed_dim, - encoder_num_heads, - encoder_depth, - qkv_bias, - mlp_ratio, - dropout, - attention_dropout, - droppath) + default_initializer=paddle.nn.initializer.Constant(0)) + + self.encoder_position_embedding = get_position_encoding( + seq_len=1 + self.num_patches, + embed_dim=encoder_embed_dim) + + self.encoder = Encoder( + encoder_embed_dim, + encoder_num_heads, + encoder_depth, + qkv_bias, + mlp_ratio, + dropout, + attention_dropout, + droppath) + + # -------------------- Decoder -------------------- # the embed_dim is different in encoder and decoder, so add a linear layer w_attr_1, b_attr_1 = self._init_weights() - self.linear_projection = nn.Linear(encoder_embed_dim, - decoder_embed_dim, - weight_attr=w_attr_1, - bias_attr=b_attr_1) - # create multi head self-attention decoder - self.decoder = Decoder(decoder_embed_dim, - decoder_num_heads, - decoder_depth, - qkv_bias, - mlp_ratio, - dropout, - attention_dropout, - droppath) + self.linear_projection = nn.Linear( + encoder_embed_dim, + decoder_embed_dim, + weight_attr=w_attr_1, + bias_attr=b_attr_1) + + self.mask_token = paddle.create_parameter( + shape=[1, 1, decoder_embed_dim], + dtype='float32', + default_initializer=paddle.nn.initializer.Constant(0)) + + self.decoder_position_embedding = get_position_encoding( + seq_len=1 + self.num_patches, + embed_dim=decoder_embed_dim) + + self.decoder = Decoder( + decoder_embed_dim, + decoder_num_heads, + decoder_depth, + qkv_bias, + mlp_ratio, + dropout, + attention_dropout, + droppath) + # create reconstruction layer w_attr_2, b_attr_2 = self._init_weights() - self.reconstruction_layer = nn.Linear(decoder_embed_dim, - in_channels * patch_size * patch_size, - weight_attr=w_attr_2, - bias_attr=b_attr_2) + self.decoder_pred = nn.Linear( + decoder_embed_dim, + in_channels * patch_size * patch_size, + weight_attr=w_attr_2, + bias_attr=b_attr_2) + + self.norm_pix_loss = norm_pix_loss def _init_weights(self): weight_attr = paddle.ParamAttr( @@ -520,38 +493,109 @@ def _init_weights(self): initializer=paddle.nn.initializer.Constant(0.0)) return weight_attr, bias_attr - def forward(self, x, masks): - # x: [B, C, H, W] - x = self.patch_embedding(x) - # x: [B, num_patches, embed_dim] - B, N, C = x.shape # B: batch_size, N: num_patches, C: embed_dim - # mask: [B, num_patches], visible set to 0, masked set to 1 - - # add pos embed - x += self.encoder_position_embedding.clone().detach() - # get no mask patches - no_mask_x = x[~masks] # [B*0.25*L, embed_dim] - # index slicing needs reshape back in paddle: [B, 0.25L, embed_dim] - no_mask_x = no_mask_x.reshape([B, -1, C]) - # encoder - enc_out = self.encoder(no_mask_x) - # encoder to decoder linear proj - enc_out = self.linear_projection(enc_out) - # shuffle the position embedding is equivalent to unshuffling tokens - expand_pos_embed = self.decoder_position_embedding.expand([B, -1, -1]).clone().detach() - pos_embed_no_mask = expand_pos_embed[~masks].reshape([B, -1, enc_out.shape[-1]]) - pos_embed_mask = expand_pos_embed[masks].reshape([B, -1, enc_out.shape[-1]]) - # dec in put, here use broadcasting for mask_token - dec_in = paddle.concat([enc_out + pos_embed_no_mask, self.mask_token + pos_embed_mask], axis=1) - # decoder - mask_len = pos_embed_mask.shape[1] - dec_out = self.decoder(dec_in, mask_len) - # reconstruct patches - output = self.reconstruction_layer(dec_out) - return output - - -class MAEFinetuneTransformer(nn.Layer): + def patchify(self, images): + n_patches = images.shape[2] // self.patch_size + x = images.reshape([images.shape[0], # N + images.shape[1], # C + n_patches, # h + self.patch_size, # p + n_patches, # w + self.patch_size]) # p + x = x.transpose([0, 2, 4, 3, 5, 1]) + x = x.reshape([images.shape[0], n_patches * n_patches, -1]) + return x + + def unpatchify(self, x): + n_patches = int(x.shape[1]**.5) + + x = x.reshape([x.shape[0], # N + n_patches, # h + n_patches, # w + self.patch_size, # p + self.patch_size, # p + -1]) # C + x = x.transpose([0, 5, 1, 3, 2, 4]) + x = x.reshape([images.shape[0], -1, n_patches * self.patch_size, n_patches * self.patch_size]) + return x + + def random_masking(self, x, mask_ratio): + """ + Shuffle x then mask the last few tokens according to mask ratio. + Args: + x: tensor of [batch, seq_len, encoder_embed_dim] + mask_ratio: float, masking ratio + Returns: + masked_x: tensor of [batch, seq_len - mask_num, encoder_embed_dim] + """ + batch_size, seq_len, embed_dim = x.shape + keep_len = int(seq_len * (1 - mask_ratio)) + rand_probs = paddle.rand([batch_size, seq_len]) + shuffle_ids = paddle.argsort(rand_probs, axis=-1) + restore_ids = paddle.argsort(shuffle_ids, axis=-1) + + keep_ids = shuffle_ids[:, :keep_len] + + ids = keep_ids + (paddle.arange(batch_size) * seq_len).unsqueeze(-1).expand([batch_size, -1]) + x_masked = paddle.gather(x.flatten(0, 1), index=ids.flatten(), axis=0).reshape([batch_size, keep_len, -1]) + + mask = paddle.ones([batch_size, seq_len]) + mask[:, :keep_len] = 0 + + restore_ids_expand = restore_ids + (paddle.arange(batch_size) * seq_len).unsqueeze(-1).expand([batch_size, -1]) + mask = paddle.gather(mask.flatten(), index=restore_ids_expand.flatten()).reshape([batch_size, seq_len]) + return x_masked, mask, restore_ids + + def forward_encoder(self, images, mask_ratio): + x = self.patch_embedding(images) + # add pos embed w/o cls token + x = x + self.encoder_position_embedding[:, 1:, :] + # masking + x, mask, ids_restore = self.random_masking(x, mask_ratio) + # append cls token + cls_token = self.cls_token + self.encoder_position_embedding[:, :1, :] + cls_tokens = cls_token.expand((x.shape[0], -1, -1)) + x = paddle.concat((cls_tokens, x), axis=1) + x = self.encoder(x) + return x, mask, ids_restore + + def forward_decoder(self, x, ids_restore): + x = self.linear_projection(x) # [batch, keep_len+1(cls_token), decoder_embed_dim] + # self.mask_token: [1, 1, decoder_embed_dim] + # ids_store: [batch, num_patches] + # mask_tokens: [batch, masked_len, decoder_embed_dim] + mask_tokens = self.mask_token.expand([x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], -1]) + # x_: [batch, num_patches, decoder_embed_dim] + x_ = paddle.concat([x[:, 1:, :], mask_tokens], axis=1) # no cls token + x_shape = x_.shape + x_ = paddle.gather(x_.flatten(0, 1), index=ids_restore.flatten()).reshape(x_shape) + x = paddle.concat([x[:, :1, :], x_], axis=1) # append cls token + + x = x + self.decoder_position_embedding + x = self.decoder(x) + x = self.decoder_pred(x) + x = x[:, 1:, :] + + return x + + def forward_loss(self, images, pred, mask): + target = self.patchify(images) + if self.norm_pix_loss: + mean = target.mean(axis=-1, keepdim=True) + var = target.var(axis=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6) ** 0.5 + loss = (pred - target) ** 2 + loss = loss.mean(axis=-1) # mean loss per patch + loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches + return loss + + def forward(self, images, mask_ratio=0.75): + encoder_out, mask, restore_ids = self.forward_encoder(images, mask_ratio) + decoder_out = self.forward_decoder(encoder_out, restore_ids) + loss = self.forward_loss(images, decoder_out, mask) + return loss, decoder_out, mask + + +class MAETransformer(nn.Layer): """ViT transformer ViT Transformer, classifier is a single Linear layer for finetune, @@ -583,20 +627,27 @@ def __init__(self, num_heads=12, mlp_ratio=4, qkv_bias=True, + global_pool=False, dropout=0., attention_dropout=0., droppath=0.): super().__init__() - self.num_patches = (image_size // patch_size) * (image_size // patch_size) - # create positional embedding - self.encoder_position_embedding = get_position_encoding(seq_len=1 + self.num_patches, - embed_dim=embed_dim) + self.global_pool = global_pool # create patch embedding with positional embedding self.patch_embedding = PatchEmbedding(image_size, patch_size, in_channels, embed_dim, dropout) + # create positional embedding + self.position_embedding = get_position_encoding( + seq_len=1 + self.patch_embedding.n_patches, + embed_dim=embed_dim) + # create class token + self.cls_token = paddle.create_parameter( + shape=[1, 1, embed_dim], + dtype='float32', + default_initializer=paddle.nn.initializer.Constant(0)) # create multi head self-attention encoder self.encoder = Encoder(embed_dim, num_heads, @@ -605,30 +656,58 @@ def __init__(self, mlp_ratio, dropout, attention_dropout, - droppath) + droppath, + has_norm=False) + # define encoder norm here to aviod cls_token (when global_pool is True) + w_attr, b_attr = self._init_weights_norm() + self.encoder_norm = nn.LayerNorm(embed_dim, + weight_attr=w_attr, + bias_attr=b_attr, + epsilon=1e-6) # classifier head (for finetuning) - w_attr_1, b_attr_1 = self._init_weights() + w_attr_1, b_attr_1 = self._init_weights_linear() self.classifier = nn.Linear(embed_dim, num_classes, weight_attr=w_attr_1, bias_attr=b_attr_1) - def forward(self, x): + + def forward_features(self, x): x = self.patch_embedding(x) - # add pos embed - x += self.encoder_position_embedding.clone().detach() + cls_tokens = self.cls_token.expand((x.shape[0], -1, -1)) + x = paddle.concat((cls_tokens, x), axis=1) + x = x + self.position_embedding x = self.encoder(x) - logits = self.classifier(x[:, 0]) # take only cls_token as classifier + + if self.global_pool: + x = x[:, 1:, :].mean(axis=1) # global poll w/o cls_token + out = self.encoder_norm(x) + else: + x = self.encoder_norm(x) + out = x[:, 0] # return cls_token only + + return out + + def forward(self, x): + x = self.forward_features(x) + logits = self.classifier(x) + return logits - def _init_weights(self): + def _init_weights_norm(self): + weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(1.0)) + bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.0)) + return weight_attr, bias_attr + + def _init_weights_linear(self): weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) return weight_attr, bias_attr def build_mae_pretrain(config): + """ build MAE vit model for pretraining""" model = MAEPretrainTransformer(image_size=config.DATA.IMAGE_SIZE, patch_size=config.MODEL.TRANS.PATCH_SIZE, in_channels=3, @@ -642,20 +721,23 @@ def build_mae_pretrain(config): qkv_bias=config.MODEL.TRANS.QKV_BIAS, dropout=config.MODEL.DROPOUT, attention_dropout=config.MODEL.ATTENTION_DROPOUT, - droppath=config.MODEL.DROPPATH) + droppath=config.MODEL.DROPPATH, + norm_pix_loss=config.MODEL.TRANS.NORM_PIX_LOSS) return model -def build_mae_finetune(config): - model = MAEFinetuneTransformer(image_size=config.DATA.IMAGE_SIZE, - patch_size=config.MODEL.TRANS.PATCH_SIZE, - in_channels=3, - embed_dim=config.MODEL.TRANS.ENCODER.EMBED_DIM, - depth=config.MODEL.TRANS.ENCODER.DEPTH, - num_heads=config.MODEL.TRANS.ENCODER.NUM_HEADS, - mlp_ratio=config.MODEL.TRANS.MLP_RATIO, - qkv_bias=config.MODEL.TRANS.QKV_BIAS, - dropout=config.MODEL.DROPOUT, - attention_dropout=config.MODEL.ATTENTION_DROPOUT, - droppath=config.MODEL.DROPPATH) +def build_transformer(config): + """ build vit model for finetuning and linear probing""" + model = MAETransformer(image_size=config.DATA.IMAGE_SIZE, + patch_size=config.MODEL.TRANS.PATCH_SIZE, + in_channels=3, + embed_dim=config.MODEL.TRANS.ENCODER.EMBED_DIM, + depth=config.MODEL.TRANS.ENCODER.DEPTH, + num_heads=config.MODEL.TRANS.ENCODER.NUM_HEADS, + mlp_ratio=config.MODEL.TRANS.MLP_RATIO, + qkv_bias=config.MODEL.TRANS.QKV_BIAS, + global_pool=config.MODEL.GLOBAL_POOL, + dropout=config.MODEL.DROPOUT, + attention_dropout=config.MODEL.ATTENTION_DROPOUT, + droppath=config.MODEL.DROPPATH) return model diff --git a/image_classification/MAE/utils.py b/image_classification/MAE/utils.py index 44800527..eae144dc 100644 --- a/image_classification/MAE/utils.py +++ b/image_classification/MAE/utils.py @@ -20,8 +20,95 @@ """ import math +import numpy as np +import paddle from paddle.optimizer.lr import LRScheduler +def get_params_groups(model): + regularized = [] + not_regularized = [] + for name, param in model.named_parameters(): + if param.stop_gradient: + continue + # do not regularize biases and norm params + if name.endswith(".bias") or len(param.shape) == 1: + not_regularized.append(param) + else: + regularized.append(param) + return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}] + + +def cosine_scheduler(base_value, + final_value, + epochs, + num_iters_per_epoch, + warmup_epochs=0, + start_warmup_value=0): + warmup_schedule = np.array([]) + warmup_iters = warmup_epochs * num_iters_per_epoch + if warmup_epochs > 0: + # linear schedule for warmup epochs + warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) + + iters = np.arange(epochs * num_iters_per_epoch - warmup_iters) + schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters))) + schedule = np.concatenate((warmup_schedule, schedule)) + assert len(schedule) == epochs * num_iters_per_epoch + return schedule + + +def interpolate_pos_embed(model, state_dict): + if 'position_embedding' in state_dict: + pos_embed_w = state_dict['position_embedding'] + embed_dim = pos_embed_w.shape[-1] + n_patches = model.patch_embedding.n_patches + n_extra_tokens = model.position_embedding.shape[-2] - n_patches # seq_l - n_patches + orig_size = int((pos_embed_w.shape[-2] - n_extra_tokens) ** 0.5) + new_size = int(n_patches ** 0.5) + if orig_size != new_size: + extra_tokens = pos_embed_w[:, :n_extra_tokens] + pos_tokens = pos_embed_w[:, n_extra_tokens:] + pos_tokens = pos_tokens.reshape([-1, orig_size, orig_size, embed_dim]) + pos_tokens = pos_tokens.transpose([0, 3, 1, 2]) + pos_tokens = paddle.nn.functional.interpolate( + pos_token, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.transpose([0, 2, 3, 1]) + pos_tokens = pos_tokens.flatten(1, 2) + new_pos_embed = paddle.concat([extra_tokens, pos_tokens], axis=1) + state_dict['position_embedding'] = new_pos_embed + + +#TODO: check correctness +class LARS(paddle.optimizer.Optimizer): + """LARS optmizer""" + def __init__(self, params, learning_rate=0., weight_decay=0., momentum=0., trust_coefficient=0.001): + super().__init__(params, learning_rate=learning_rate, weight_decay=weight_decay) + + @paddle.no_grad() + def step(self): + for g in self.param_groups: + for p in g['params']: + dp = p.grad + if dp is None: + continue + if p.ndim > 1: + dp = dp.add(p, alpha=g['weight_decay']) + param_norm = paddle.norm(p) + update_norm = paddle.norm(dp) + one = paddle.ones_list(param_norm) + q = paddle.where(param_norm >0., + paddle.where(update_norm > 0, + (g['trust_coefficient'] * param_norm / update_norm), + one), + one) + dp = dp.mul(q) + param_state = self.state[p] + if 'mu' not in param_state: + param_state['mu'] = paddle.zeros_like(p) + mu = param_state['mu'] + mu.mul_(g['momentum']).add_(dp) + p.add_(mu, alpha=-g['lr']) + class AverageMeter(): """ Meter for monitoring losses""" From a84c9946f6e030c3efa50517e9067cd8439cba3d Mon Sep 17 00:00:00 2001 From: xperzy Date: Fri, 11 Feb 2022 09:15:03 +0800 Subject: [PATCH 02/12] fix bug --- image_classification/MAE/main_multi_gpu_finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/image_classification/MAE/main_multi_gpu_finetune.py index 2eab37fd..446834c4 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/image_classification/MAE/main_multi_gpu_finetune.py @@ -523,7 +523,7 @@ def main_worker(*args): write_log(local_logger, master_logger, local_message, master_message) # validation - if epoch % config.VALIDATION_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: + if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}') val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( dataloader=dataloader_val, From e98593270a818bd4a7fea4f98a5800dffc0a89e8 Mon Sep 17 00:00:00 2001 From: xperzy Date: Mon, 14 Feb 2022 11:16:28 +0800 Subject: [PATCH 03/12] Update README.md --- image_classification/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/image_classification/README.md b/image_classification/README.md index 025a21f3..2d23bfe5 100644 --- a/image_classification/README.md +++ b/image_classification/README.md @@ -6,6 +6,7 @@ PaddlePaddle training/validation code and pretrained models for **Image Classifi This implementation is part of [PaddleViT](https://github.com/BR-IDL/PaddleViT.git) project. ## Update +* Update (2022-02-14): Add imagenet train_list.txt and val_list.txt links. * Update (2021-12-30): Add MobileViT model and multi scale sampler. * Update (2021-12-28): Add HvT model. * Update (2021-12-24): Add CvT model. @@ -78,6 +79,8 @@ cd PaddleViT/image_classification ImageNet2012 dataset is used in the following folder structure: ``` │imagenet/ +├──train_list.txt +├──val_list.txt ├──train/ │ ├── n01440764 │ │ ├── n01440764_10026.JPEG @@ -91,6 +94,10 @@ ImageNet2012 dataset is used in the following folder structure: │ │ ├── ...... │ ├── ...... ``` +- `train_list.txt`: list of relative paths and labels of training images. You can download it from: [google](https://drive.google.com/file/d/10YGzx_aO3IYjBOhInKT_gY6p0mC3beaC/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1G5xYPczfs9koDb7rM4c0lA?pwd=a4vm)(a4vm) +- `val_list.txt`: list of relative paths and labels of validation images. You can download it from: [google](https://drive.google.com/file/d/1aXHu0svock6MJSur4-FKjW0nyjiJaWHE/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1TFGda7uBZjR7g-A6YjQo-g?pwd=kdga)(kdga) + + ### Demo Example To use the model with pretrained weights, go to the specific subfolder, then download the `.pdparam` weight file and change related file paths in the following python scripts. The model config files are located in `./configs/`. From a585ed1f62b5859c17f9178fae343dd310a89a44 Mon Sep 17 00:00:00 2001 From: xperzy Date: Mon, 14 Feb 2022 11:18:34 +0800 Subject: [PATCH 04/12] Update README_cn.md --- image_classification/README_cn.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/image_classification/README_cn.md b/image_classification/README_cn.md index 4bf06982..6711855d 100644 --- a/image_classification/README_cn.md +++ b/image_classification/README_cn.md @@ -6,6 +6,7 @@ PaddlePaddle用于图像分类的训练/评估代码和预训练模型。 此实现是 [PaddleViT](https://github.com/BR-IDL/PaddleViT.git) 项目的一部分. ## 更新 +* 更新 (2021-02-14): 添加 imagenet1k 的 train_list.txt 和 val_list.txt * 更新 (2021-12-30): 添加 MobileViT 模型和 multi scale sampler. * 更新 (2021-12-28): 添加 HvT 模型. * 更新 (2021-12-24): 添加 CvT 模型. @@ -74,9 +75,11 @@ cd PaddleViT/image_classification ## 基本用法 ### 数据准备 -ImageNet2012 数据集用于以下文件结构: +ImageNet2012 数据集使用以下的格式存储: ``` │imagenet/ +├──train_list.txt +├──val_list.txt ├──train/ │ ├── n01440764 │ │ ├── n01440764_10026.JPEG @@ -90,6 +93,9 @@ ImageNet2012 数据集用于以下文件结构: │ │ ├── ...... │ ├── ...... ``` +- `train_list.txt`: 训练集图片的路径和标签。下载链接: [google](https://drive.google.com/file/d/10YGzx_aO3IYjBOhInKT_gY6p0mC3beaC/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1G5xYPczfs9koDb7rM4c0lA?pwd=a4vm)(a4vm) +- `val_list.txt`: 验证集图片的相对路径和标签。下载链接: [google](https://drive.google.com/file/d/1aXHu0svock6MJSur4-FKjW0nyjiJaWHE/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1TFGda7uBZjR7g-A6YjQo-g?pwd=kdga)(kdga) + ### Demo 示例 如果需要使用具有预训练权重的模型,请转到特定子文件夹,然后下载 `.pdparam` 权重文件,并在以下python脚本中更改相关文件路径,模型配置文件位于 `./configs/`. From d229dde63b1093fd2a13df453f8564f875084ab7 Mon Sep 17 00:00:00 2001 From: xperzy Date: Tue, 15 Feb 2022 17:52:35 +0800 Subject: [PATCH 05/12] fix bugs in model, configs, pretrain, and finetune scripts --- image_classification/MAE/README.md | 142 +++++++++--------- image_classification/MAE/config.py | 4 +- .../vit_base_patch16_224_finetune.yaml | 6 +- ...base_patch16_224_finetune_single_node.yaml | 45 ++++++ .../vit_base_patch16_224_linearprobe.yaml | 2 +- .../vit_base_patch16_224_pretrain.yaml | 2 +- .../vit_huge_patch14_224_finetune.yaml | 4 +- .../vit_huge_patch14_224_linearprobe.yaml | 2 +- .../vit_huge_patch14_224_pretrain.yaml | 2 +- .../vit_large_patch16_224_finetune.yaml | 8 +- .../vit_large_patch16_224_pretrain.yaml | 3 +- image_classification/MAE/mae.png | Bin 0 -> 390302 bytes .../MAE/main_multi_gpu_finetune.py | 131 ++++++++-------- .../MAE/main_multi_gpu_linearprobe.py | 126 ++++++++-------- .../MAE/main_multi_gpu_pretrain.py | 16 +- image_classification/MAE/run.sh | 26 ++++ .../MAE/run_finetune_multi.sh | 7 +- .../MAE/run_pretrain_multi.sh | 4 +- .../MAE/run_pretrain_multi_resume.sh | 9 ++ image_classification/MAE/stat_define.py | 61 ++++++++ image_classification/MAE/transformer.py | 51 +++++-- 21 files changed, 407 insertions(+), 244 deletions(-) create mode 100644 image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml create mode 100644 image_classification/MAE/mae.png create mode 100644 image_classification/MAE/run.sh create mode 100644 image_classification/MAE/run_pretrain_multi_resume.sh create mode 100644 image_classification/MAE/stat_define.py diff --git a/image_classification/MAE/README.md b/image_classification/MAE/README.md index 8db9f25b..98bf486a 100644 --- a/image_classification/MAE/README.md +++ b/image_classification/MAE/README.md @@ -1,37 +1,38 @@ -# TODO: This README should be modified -# An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale, [arxiv](https://arxiv.org/abs/2010.11929) +# Masked Autoencoders Are Scalable Vision Learners, [arxiv](https://arxiv.org/abs/2111.06377) -PaddlePaddle training/validation code and pretrained models for **ViT**. +PaddlePaddle training/validation code and pretrained models for **MAE**. -The official TF implementation is [here](https://github.com/google-research/vision_transformer). +The official pytorch implementation is [here](https://github.com/facebookresearch/mae). This implementation is developed by [PaddleViT](https://github.com/BR-IDL/PaddleViT.git).

-drawing -

ViT Model Overview

+drawing +

MAE Model Overview

### Update -- Update (2021-09-27): More weights are uploaded. -- Update (2021-08-11): Code is released and ported weights are uploaded. +- Update (2022-02-15): Code is refactored and ported weights are uploaded. +- Update (2021-12-13): Code is released. ## Models Zoo -| Model | Acc@1 | Acc@5 | #Params | FLOPs | Image Size | Crop_pct | Interpolation | Link | +| Finetuned Model | Acc@1 | Acc@5 | #Params | FLOPs | Image Size | Crop_pct | Interpolation | Link | |-------------------------------|-------|-------|---------|--------|------------|----------|---------------|--------------| -| vit_base_patch32_224 | 80.68 | 95.61 | 88.2M | 4.4G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1DPEhEuu9sDdcmOPukQbR7ZcHq2bxx9cr/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1ppOLj5SWlJmA-NjoLCoYIw)(ubyr) | -| vit_base_patch32_384 | 83.35 | 96.84 | 88.2M | 12.7G | 384 | 1.0 | bicubic | [google](https://drive.google.com/file/d/1nCOSwrDiFBFmTkLEThYwjL9SfyzkKoaf/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1jxnL00ocpmdiPM4fOu4lpg)(3c2f) | -| vit_base_patch16_224 | 84.58 | 97.30 | 86.4M | 17.0G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/13D9FqU4ISsGxWXURgKW9eLOBV-pYPr-L/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1ms3o2fHMQpIoVqnEHitRtA)(qv4n) | -| vit_base_patch16_384 | 85.99 | 98.00 | 86.4M | 49.8G | 384 | 1.0 | bicubic | [google](https://drive.google.com/file/d/1kWKaAgneDx0QsECxtf7EnUdUZej6vSFT/view?usp=sharing)/[baidu](https://pan.baidu.com/s/15ggLdiL98RPcz__SXorrXA)(wsum) | -| vit_large_patch16_224 | 85.81 | 97.82 | 304.1M | 59.9G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1jgwtmtp_cDWEhZE-FuWhs7lCdpqhAMft/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1HRxUJAwEiKgrWnJSjHyU0A)(1bgk) | -| vit_large_patch16_384 | 87.08 | 98.30 | 304.1M | 175.9G | 384 | 1.0 | bicubic | [google](https://drive.google.com/file/d/1zfw5mdiIm-mPxxQddBFxt0xX-IR-PF2U/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1KvxfIpMeitgXAUZGr5HV8A)(5t91) | -| vit_large_patch32_384 | 81.51 | 96.09 | 306.5M | 44.4G | 384 | 1.0 | bicubic | [google](https://drive.google.com/file/d/1Py1EX3E35jL7DComW-29Usg9788BB26j/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1W8sUs0pObOGpohP4vsT05w)(ieg3) | -| | | | | | | | | | - +| mae_finetuned_vit_base | 83.72 | 96.54 | 86.4M | 17.0G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1txV3fWnu_Jr17tCCqk9e_pFeuh7GkmvU/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1cqed6Omp8GeNVaa3-W82GA?pwd=i71u)(i71u) | +| mae_finetuned_vit_large | 85.95 | 97.57 | 304.1M | 59.9G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1dzVWxQ0_XTKqKKpA3pSSVU57rT_g8nOe/view?usp=sharing)/[baidu](https://pan.baidu.com/s/17cG1UC3gX4dAXdGDTv_BBw?pwd=v2zk)(v2zk) | +| mae_finetuned_vit_huge | 86.90 | 98.07 | 631.7M | 162.5G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1xqjdPez4uG495w3akVbHbn4YqUB1Nmmk/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1N1t-dsNZpwXSKeVOTkz3IQ?pwd=gs6c)(gs6c) | > *The results are evaluated on ImageNet2012 validation set. +| Pretrained Model | Link | +|-------------------------------|--------------| +| mae_pretrain_vit_base | [google](https://drive.google.com/file/d/1K7ZEaDj1D56i7uTX46hSelf0Ydbpmtie/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1kBn-ad2xyCar4xt-k_oYaA?pwd=rmsi)(rmsi) | +| mae_pretrain_vit_large | [google](https://drive.google.com/file/d/1UagT3mz_cLHcjyIQfyyLOkXtJXda3UbS/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1IcdX_rDdl9vLyI7rD1I8HQ?pwd=r77v)(r77v) | +| mae_pretrain_vit_huge | [google](https://drive.google.com/file/d/1Y1lIO_COL2vkz2YvrmYt2yI8iAiRNiPh/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1Wk_tp8De4AYNFBGnIgl5fg?pwd=mthi)(mthi) | + + + ## Notebooks We provide a few notebooks in aistudio to help you get started: @@ -41,13 +42,15 @@ We provide a few notebooks in aistudio to help you get started: ## Requirements - Python>=3.6 - yaml>=0.2.5 -- [PaddlePaddle](https://www.paddlepaddle.org.cn/documentation/docs/en/install/index_en.html)>=2.1.0 +- [PaddlePaddle](https://www.paddlepaddle.org.cn/documentation/docs/en/install/index_en.html)>=2.2.0 - [yacs](https://github.com/rbgirshick/yacs)>=0.1.8 ## Data ImageNet2012 dataset is used in the following folder structure: ``` │imagenet/ +├──train_list.txt +├──val_list.txt ├──train/ │ ├── n01440764 │ │ ├── n01440764_10026.JPEG @@ -61,6 +64,8 @@ ImageNet2012 dataset is used in the following folder structure: │ │ ├── ...... │ ├── ...... ``` +- `train_list.txt`: list of relative paths and labels of training images. You can download it from: [google](https://drive.google.com/file/d/10YGzx_aO3IYjBOhInKT_gY6p0mC3beaC/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1G5xYPczfs9koDb7rM4c0lA?pwd=a4vm)(a4vm) +- `val_list.txt`: list of relative paths and labels of validation images. You can download it from: [google](https://drive.google.com/file/d/1aXHu0svock6MJSur4-FKjW0nyjiJaWHE/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1TFGda7uBZjR7g-A6YjQo-g?pwd=kdga)(kdga) ## Usage To use the model with pretrained weights, download the `.pdparam` weight file and change related file paths in the following python scripts. The model config files are located in `./configs/`. @@ -68,107 +73,98 @@ To use the model with pretrained weights, download the `.pdparam` weight file an For example, assume the downloaded weight file is stored in `./vit_base_patch16_224.pdparams`, to use the `vit_base_patch16_224` model in python: ```python from config import get_config -from transformer import build_vit as build_model +from transformer import build_transformer as build_model # config files in ./configs/ config = get_config('./configs/vit_base_patch16_224.yaml') # build model model = build_model(config) # load pretrained weights, .pdparams is NOT needed model_state_dict = paddle.load('./vit_base_patch16_224.pdparams') -model.set_dict(model_state_dict) +model.set_state_dict(model_state_dict) ``` ## Evaluation -To evaluate ViT model performance on ImageNet2012 with a single GPU, run the following script using command line: +To evaluate ViT model performance on ImageNet2012, run the following script using command line: ```shell -sh run_eval.sh +sh run_eval_multi.sh ``` or ```shell -CUDA_VISIBLE_DEVICES=0 \ -python main_single_gpu.py \ - -cfg='./configs/vit_base_patch16_224.yaml' \ +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python main_multi_gpu_finetune.py \ + -cfg='./configs/vit_base_patch16_224_finetune.yaml' \ -dataset='imagenet2012' \ - -batch_size=16 \ + -batch_size=32 \ -data_path='/dataset/imagenet' \ -eval \ - -pretrained='./vit_base_patch16_224.pdparams' + -pretrained='./mae_finetuned_vit_base' ``` -
- - -Run evaluation using multi-GPUs: - +## Finetuning +To finetune the ViT model on ImageNet2012, run the following script using command line: ```shell -sh run_eval_multi.sh +sh run_finetune_multi.sh ``` or ```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 \ -python main_multi_gpu.py \ - -cfg='./configs/vit_base_patch16_224.yaml' \ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +python main_multi_gpu_finetune.py \ + -cfg='./configs/vit_base_patch16_224_finetune.yaml' \ -dataset='imagenet2012' \ - -batch_size=16 \ + -batch_size=32 \ -data_path='/dataset/imagenet' \ - -eval \ - -pretrained='./vit_base_patch16_224.pdparams' + -pretrained='./mae_pretrain_vit_base' + -amp ``` -
- +## Linear probing +To finetune(linear probe) the ViT model on ImageNet2012, run the following script using command line: -## Training -To train the ViT model on ImageNet2012 with single GPU, run the following script using command line: ```shell -sh run_train.sh +sh run_linear_probe_multi.sh ``` or ```shell -CUDA_VISIBLE_DEVICES=0 \ -python main_single_gpu.py \ - -cfg='./configs/vit_base_patch16_224.yaml' \ - -dataset='imagenet2012' \ - -batch_size=32 \ - -data_path='/dataset/imagenet' \ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +python main_multi_gpu_linearprobe.py \ + -cfg='./configs/vit_base_patch16_224_linearprobe.yaml' \ + -dataset='imagenet2012' \ + -batch_size=32 \ + -data_path='/dataset/imagenet' \ + -pretrained='./mae_pretrain_vit_base' + -amp ``` - -
- - -Run training using multi-GPUs: - - +## Pretraining +To pretrain the ViT model on ImageNet2012, run the following script using command line: ```shell -sh run_train_multi.sh +sh run_pretrain_multi.sh ``` or ```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 \ -python main_multi_gpu.py \ - -cfg='./configs/vit_base_patch16_224.yaml' \ - -dataset='imagenet2012' \ - -batch_size=16 \ - -data_path='/dataset/imagenet' \ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +python main_multi_gpu_pretrain.py \ +-cfg='./configs/vit_base_patch16_224_pretrain.yaml' \ +-dataset='imagenet2012' \ +-batch_size=32 \ +-data_path='/dataset/imagenet' \ +-amp ``` -
- - +> Note: it is recommended to train the MAE model on multi-node GPUs. ## Visualization Attention Map **(coming soon)** ## Reference ``` -@article{dosovitskiy2020image, - title={An image is worth 16x16 words: Transformers for image recognition at scale}, - author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and others}, - journal={arXiv preprint arXiv:2010.11929}, - year={2020} +@Article{MaskedAutoencoders2021, + author = {Kaiming He and Xinlei Chen and Saining Xie and Yanghao Li and Piotr Doll{\'a}r and Ross Girshick}, + journal = {arXiv:2111.06377}, + title = {Masked Autoencoders Are Scalable Vision Learners}, + year = {2021}, } ``` diff --git a/image_classification/MAE/config.py b/image_classification/MAE/config.py index c066d9d5..c3a4a787 100644 --- a/image_classification/MAE/config.py +++ b/image_classification/MAE/config.py @@ -78,7 +78,7 @@ _C.TRAIN.WARMUP_START_LR = 1e-6 # 0.0 _C.TRAIN.END_LR = 5e-4 _C.TRAIN.GRAD_CLIP = None -_C.TRAIN.ACCUM_ITER = 2 # 1 +_C.TRAIN.ACCUM_ITER = 1 _C.TRAIN.LINEAR_SCALED_LR = None _C.TRAIN.LAYER_DECAY = None # used for finetuning only @@ -118,7 +118,7 @@ # misc _C.SAVE = "./output" _C.TAG = "default" -_C.SAVE_FREQ = 1 # freq to save chpt +_C.SAVE_FREQ = 20 # freq to save chpt _C.REPORT_FREQ = 100 # freq to logging info _C.VALIDATE_FREQ = 100 # freq to do validation _C.SEED = 0 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml b/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml index eb666192..106ddd1e 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml @@ -15,11 +15,11 @@ MODEL: DEPTH: 12 NUM_HEADS: 12 TRAIN: - NUM_EPOCHS: 50 + NUM_EPOCHS: 100 # same as MAE official readme WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 - BASE_LR: 1e-3 - WARMUP_START_LR: 0.0 + BASE_LR: 5e-4 + WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 END_LR: 1e-6 ACCUM_ITER: 1 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml new file mode 100644 index 00000000..e3dbb6c7 --- /dev/null +++ b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml @@ -0,0 +1,45 @@ +DATA: + IMAGE_SIZE: 224 + CROP_PCT: 0.875 +MODEL: + TYPE: FINETUNE + NAME: vit_base_patch16_224 + DROPPATH: 0.1 + GLOBAL_POOL: True + TRANS: + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 +TRAIN: + ACCUM_ITER: 4 # set batch size to 32 + NUM_EPOCHS: 100 # same as MAE official readme + WARMUP_EPOCHS: 5 + WEIGHT_DECAY: 0.05 + BASE_LR: 5e-4 + WARMUP_START_LR: 1e-7 + LINEAR_SCALED_LR: 256 + END_LR: 1e-6 + ACCUM_ITER: 1 + OPTIMIZER: + NAME: 'AdamW' + BETAS: (0.9, 0.999) + LAYER_DECAY: 0.65 + SMOOTHING: 0.1 + RAND_AUGMENT: True + RAND_AUGMENT_LAYERS: 9 + RAND_AUGMENT_MAGNITUDE: 5 + MIXUP_ALPHA: 0.8 + MIXUP_PROB: 1.0 + MIXUP_SWITCH_PROB: 0.5 + MIXUP_MODE: 'batch' + CUTMIX_ALPHA: 1.0 + CUTMIX_MINMAX: None + RANDOM_ERASE_PROB: 0.25 + RANDOM_ERASE_MODE: 'pixel' + RANDOM_ERASE_COUNT: 1 + RANDOM_ERASE_SPLIT: False + diff --git a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml b/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml index 4a3d039d..3620ae6f 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml @@ -5,7 +5,7 @@ MODEL: TYPE: LINEARPROBE NAME: vit_base_patch16_224 DROPPATH: 0.1 - GLOBAL_POOL: False + GLOBAL_POOL: False # enable cls_token TRANS: PATCH_SIZE: 16 MLP_RATIO: 4.0 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml b/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml index e43573dc..2badb0a3 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml @@ -23,7 +23,7 @@ TRAIN: WARMUP_EPOCHS: 40 WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 - WARMUP_START_LR: 0.0 + WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 GRAD_CLIP: None ACCUM_ITER: 1 diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml index 0c15171b..0ddf9d4b 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml +++ b/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml @@ -7,7 +7,7 @@ MODEL: DROPPATH: 0.3 GLOBAL_POOL: True TRANS: - PATCH_SIZE: 16 + PATCH_SIZE: 14 MLP_RATIO: 4.0 QKV_BIAS: true ENCODER: @@ -19,7 +19,7 @@ TRAIN: WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 BASE_LR: 1e-3 - WARMUP_START_LR: 0.0 + WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 END_LR: 1e-6 ACCUM_ITER: 1 diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml index e753155f..b47763e7 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml +++ b/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml @@ -7,7 +7,7 @@ MODEL: DROPPATH: 0.1 GLOBAL_POOL: False TRANS: - PATCH_SIZE: 16 + PATCH_SIZE: 14 MLP_RATIO: 4.0 QKV_BIAS: true ENCODER: diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml index ccb6bfef..f791594d 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml +++ b/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml @@ -23,7 +23,7 @@ TRAIN: WARMUP_EPOCHS: 40 WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 - WARMUP_START_LR: 0.0 + WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 GRAD_CLIP: None ACCUM_ITER: 1 diff --git a/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml b/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml index 050ec685..e2a86bac 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml +++ b/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml @@ -4,7 +4,7 @@ DATA: MODEL: TYPE: FINETUNE NAME: vit_large_patch16_224 - DROPPATH: 0.1 + DROPPATH: 0.2 # same as MAE official readme GLOBAL_POOL: True TRANS: PATCH_SIZE: 16 @@ -18,15 +18,15 @@ TRAIN: NUM_EPOCHS: 50 WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 - BASE_LR: 1e-3 - WARMUP_START_LR: 0.0 + BASE_LR: 1e-3 # absolute_lr = base_lr * total_batch_size / 256 + WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 END_LR: 1e-6 ACCUM_ITER: 1 OPTIMIZER: NAME: 'AdamW' BETAS: (0.9, 0.999) - LAYER_DECAY: 0.65 + LAYER_DECAY: 0.75 # same as MAE official readme SMOOTHING: 0.1 RAND_AUGMENT: True RAND_AUGMENT_LAYERS: 9 diff --git a/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml b/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml index 15eec2a1..a90c4aa6 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml +++ b/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml @@ -10,6 +10,7 @@ MODEL: MLP_RATIO: 4.0 QKV_BIAS: true MASK_RATIO: 0.75 + NORM_PIX_LOSS: True ENCODER: EMBED_DIM: 1024 DEPTH: 24 @@ -23,7 +24,7 @@ TRAIN: WARMUP_EPOCHS: 40 WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 - WARMUP_START_LR: 0.0 + WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 GRAD_CLIP: None ACCUM_ITER: 1 diff --git a/image_classification/MAE/mae.png b/image_classification/MAE/mae.png new file mode 100644 index 0000000000000000000000000000000000000000..6ca07defcee1b64a20015b960aa044d7c10870e5 GIT binary patch literal 390302 zcmeFZcUV(t_cjU$BIt;NgI&QI1raQ$v;=h!u@DO(0zyzkq$5aA5iF>v*eEI_Dk_oy zQE5_=fR3VoM8yy(At;alfdmL4q@M?onfEu}nQuPd^T#>Yx#S{bv$OZkUh7%UTKBrw zdiI6AP7c55&eK&>Q~Skn$M5^q)U-KjYMSS!Oa^;?>#aVarlx+tZ`-!Lj@z~w?F|b# z<`?Lrrnd5$x0l!c9jjNqIegg5>&-ipRk~qO`_G@hwBHL?Q~9#8y7JZCH_iu6Z zx7RrzS#S1ubOBw(Ijh*_SV2Fwv$e~ zPrbT!>f&R^swIo5lX)B24GPfAPHn?G@q zsZ`;cCrv6}G3h0nr&6iflqwb5XWF#7l2frXb+uHyH?OGigEi3dMW2I?$M)<|GX~pJ z)HK!SscC^Nb?}0!&;N6KyZTDCN#FKssHr9TscC*c#tFQS{agjFv2(`XCtW|MrVakn z2e0V68WTrrbM8)>*w#D;K2zIrV4I^Oct7AB=Hn9_emvwP3Eh1e?9d6_;To={Hf#CV zOWkq*vH@`Y&wkDaPafQ}+sZp6XwBiHAxC`HL89BvhD zyX4yuR$zN2N zvt4rhM>=+B?=`}B$S`|F#6!@r9K1T-7_#BA*vbF)9M z4NkQg+iSJgFWM*2^>@D@pfm6t(6wtVHrsqV;eYw)udn=%QxE=i>N@jv>;Ci9|M=+d zry|0A!nTD3fiFD?{cFI!pZuR6em~L1Y)tn5(8ai-zwHH@hU(gw{W)q--JQ9yzk-2W z;P*S+8N35C8~f4N2L7xZe+S!>Of0enb<)+;?9?28-{Kst-p8s_UB zO>s>QPRDEO-)21FTg;zvUdvx+ZNjgnWwUdSZ#*Bh^y&yot3=rev>bdET z%Q5{?{3`q6`t7IG)HO7x%rLV1r;Wbf+wIJ0Gm)3||M{42r^l}_N*8bWmxI(btbdzP zCzye3_}_jhxS*Z6VEsSd)WrQpt~5$#FV26YGqK|zBst~P=KD_q{&{NOULfeULwjca zn+#PI^ncUZjL-K@d0`Wu-(K-=G9<(h{$&Ja7(LmhUL~J?!eY*4 z?`m!bpo$uVarJNJ6yHB(3a6ywv4Mu>2)HPXAiR$pM=F)(;5%PtAhVYIrRZF8DQeAxxB(zF$lK?C{x5*` z?UOmTL6UKNy}ym-ltUn8Krsd;2*@X6=sp-zpcg7vgO^Aa?C;V6*7==@Y65 z+ylnQV&^a4^MOA{YTHbZ;RJ2JoTwI00=Ge$=Q%;Nn=dbLqH?d6SZ z9=I)V2rc!E`uD5F=jniqWqZ!zzvn*3!Ikg5dptP9C=_95jzhc*_{2&YDrj{_?gV0^HWvAnxi^n~=Q`Sh;)F zrx|~f-FySM^7~_-qNmJwZVdEG{9^I_2(J>LN0!t-^KXXo{3Y0LZ4*=GbgHrlYzACd zFd^#aF&hfFIq&b8;yECoVe_SVdg>ZTpkJg##RTo%=K>7H9hEm>1{3l`9Rwqn2!S(o zW;_Rn;7qSgn)t{a*igvj{oDR7`+FeZwmbVe7TdKGfqseSm=lJ=8l$awB%qXuYkb>i zpFVX8M=;RY1MGo0Hi*l^CPel6IZ1v_k_l%v!|3NE`8i2`Mv|YAB>rb4`TsB6`WrB^ zJ5|X8=!UtcZ}9}Y`70p&&prQuNG8C)n>2>F`pNn)K0V%7?D@ReBw1XOG-Vjyt~8N(uV5i6DJiE z7VSVCODeqO&3jBJ@z~hX+}}d%AhF&)sO`9G4ZR)N5W5>vdZgH@*!>fufvn6p^vr$r znzF92#&^ere4FHHtF#+sF`dt|3J2R1aje7h@~b;To9j`RbKQd7=O4&d4JIp(P-@qY z8(iqFF*{3$as_twJFv5*b!#SQ0FXw;7;1jE(=`3j`8phv-M9nhwCO!2b~H3gV?cR* zwqQ=3VX>F;$ou?813&^DAzp%Xg-GMxb2f5dz4cXtFiqQH0)EZ^p^{qrw7v)x4b8X`&6=aTW94^V5Zh zV8i;7Ni8s);sRXE)7>f3iaTbrS%J_H139eZ2(?UD6KRo7JI=&E&=d0)7I|TB8&S3j zFRk!p$apI$2D}no3DcP4E@SWyldi+8cA68O(6&nb$p||6w4@MeI@0fAuA)R1 z5vM9NbE~AoxelJ~fI+#PrX4qDo5i83lt-J*F5^BKb4~6FaMACv=dT`D`k1Q}C6TQ8 znp37By{0X0530kr4{rRFc;ek!Gz0@XH|T1it8oW-piTS2r%$BM>w&XB7F_>57k1IW zDahD@;$l+omfyhxHi)~dgSZ^G3pC!po0k@;WsG&El?Ha|CGVB@j< z1JUz)Uq0u2a}6&?bLVNr?*UZEoyV4m6RzPF5On<}-|ykgB%U@Bz>a;jn__r*n6ydWzdJf~PwH|;r(&A2F`5-HdX zp%EnIr|6bs4UXvLDfvnxX1<};O$S^5-6+aYL=BU|Yq9N{SuaKi70*Tp@=gb&*%%qM z^w&AtrbN#_uqwLZ7t!HE;nUcT?Bv5}m&*y2D}X=P^C@H7_sISkAO+g3I|bu#C_beU zr7*#mqtqjVA`*w2u~>6LS8vPH{`Z~jh_W^T=_u9s^#K#M84o`xV3o{qQAz`s9l3wq zfXNqhrUy*(aVYi0VE!HMIYCkq(Txzr9tD6QF$+~(O=Zd$ZXN!kOp-*5R@N9 z^|b0R-x8L&2z5>vBySS;z;PdAy9~iFEgv5y=l^1uhUmjU#c_Ehd#UuU1GlKebb2`A zC6mp|OC^O=$-68SaSEz(5g!wQW*>A(7EFiea7_wP2%^}aH>HPbcsCXvgYkZG@=F{ zKV9VHpCj+E*>8b@?S6BDfE(%YfmImUdD&=qPf-u?-l&yaau0eb;K~>49@*MUbmG1l zpFtR@Y6xf5Ag6eIki?SqVsD+Y z-SvWdCgLuN+t?#&MZOkai;`KpMk4B%*GDksxc8|KLV$7P^&o3MlT!=m5hiEIvf2_a zsQ3=pFGRrB$ZpMcoke3C7orq)UX%a^p;f9F@*Lnj>|<8Rdpn1-rOwREqUC*bq&QuW z_Dqo6ds;#r^ucZ(=yd^C#`KwF0OdI7(V|5&aK9Rv>&CA=bE;(RwS?-Hy@szXss}QM z6?0zuGFDCrTmm@T-``n&2cq%s#-?1UWzzVRJE$Y?Euno#>@ju4-8zoSCBEQEmh7sb zwd&V#pOC8L(zZbbr3B>Cm*-m7hK&?ZrgH);PC3_dk!lVO4mv#$R)?7(<*4wYTKtD8mlkVQTz>nTP1`xC zdA{X2!2?0HIh3@0i`PsLi&ps8Ph>KIV}x_vJJ<1>uiV*=q!s0-lc!Nwnp8^o2foO2 z*JnsN%4K&O@&U5ZQ+x@fEex6~mU|qIo5~44L|RQhQRI(V2-{->yXB&V!5wOGuaaA3 zgwTQ0PklnSL7Os?}Sdp{)Is?gW(`>C93yJ9d7V9ClimuY%)e*a}I8L z8XKG>^?1lo(ZXK zhNzR{YLVus!=pB;x&vEacL>@3R2m&^8bR>ndnB^wmxwGAaT5`-I=rw2;#T^!(d z>@`M+So%}PcD6-2IkZAU?q8OP_<~jjEJho*%O3WT!l&BK07L%)3Yt8uhE^;gc*BrpfexDUDh!f52Z$ z&>hxomdR?7MVPN6a@(BsEj&aQ+9T`5pl%m$G}6fX~gN7bVL?!b3FU&s^7!N_mARb4bdYB&=wb9;8U$3m)@!XWxiSmX%e@kKT@}L#l=% z&kpuiYe~vqyD;m|w_E@NcFj=zlb-sq{^kfu)~HThjK9=_L;AuWh8tT#(7;P;(Oy(n>hhrEu@Yet3e z=Ac;$-7k;^Kld8!Z7-}bP>miDY_I z@KM}Mtie$j`U-_DC)r(u79Gyy7#1_s!}=GXn5fy$#Z_pSqtx4If|B5p%Inhst97c5NjBU^kQkV(yGuzI_U_NWexojIknZKm0TC6 z$Pk)L3c089d4*P#daj|Zx#}-M7`mGT=K;!KrCjm=B%9d5ckZ&W+F@4 zV;5v_9$8+rXN5LvUuqk{=}OBSnXYQY#i{`&-qpl>rckgW{4-Io$?5Ovo5MyTSi}L9 z{l+S}+5N7A8ZXBIYr*m#2CQ}@e>yJ~zuVtc7U70TIlfO4JC%|AtW&X}_9b$LXDt&x zwB4{_NOjo6bkI#P-On?k5b4yiwM2EBKQnYj(9Frw}S_7RP9HDXt3* z?vnL~{RhHGBeH-`vb-AF$WbKA7kgV)3tvyIH8jkvCzZ*S$`P}cpnyq*8;#QmMMOy% zSz*;jtLU^aYip$kI1O!7*NF1`mJ;s1{^Kbj&q!IsHQCB*sYlMid6=V`qNj4hmBuUR zxdX3!ipjy+_ifR&NJ5y8$3X=Arjwb zx*T4$w!&Claf>&Drl)8zaO=X*Q06!W6IA-!_Ke+SxnrPsWmQL{ZS|+-Z;6q&^!NED z@nh9za_s1LlxFl~7r;epB8tXq_uGvI7vWRZf7ZPH6NH!uauV2~fa*bwH ze1*J8win9v>5(KQQ9si7>tUHkEoH-T*O>?lc8Ec{xOz=^Uzf#4CwC`W>N^0@8yX6n zUdb=k%^WLAj{&+_hqbk*EQ&g1b$!J+{(PI*WUf=O1u(Ufe=DDW+1C1?BVdY4!8b_f zwOS)&Do3V9L8gDYE;2oYtM-Csi`K@e?kC7xwxd$|(t4uKN+hVMZ+YFyOS+$BS1EcN z(shb4nXgK6G}AYYmel*XPFCb~|hzUa@&>L^}-M&vS|4WxvZ9Jf_1Gp5f)h})FM`$fz* z*0s(LOKcHOsZc);N}IOGpdDH6*mvVSWDY#EEkQD?+xMw-1Pcf(Z?`y1I-7k5S`A4j zS1OipnS%ze;SFzzz+F>|2v?kMMC`_0&S=Qrr^w&8tPpN3j6to1-2nrCv3X4Y-=O9> zFG$GLMNu!TE^Z&!|0{yokSx9w$)ywK#RfYc?6t6|3Lyb+GrYYp&B$+EjJy5IA#A)m7n z*JHq#M3Gm;ZZweoizb~SW}rS<7{38|LIk=9Su0>-IN3VgGA~+L!+uHRWw$jnEr}(S ze}LB3dt3Ob!rIol=a_Gj=BSzM36#UNW$Mc}f@~Bro{b`Y92Z3Caby>Tx}4->foL#7 zWAq|}31IReYvZfRohv==#VkUemNcFPGnYZo{Q!&vQHXLD#SsU{#hJY$(|^USYI zi#Gi_CZLv~X9_ zH%{o z)i%LfuJMehhhV&9EW%bCx&77Tk|N; zHK=M1&yJG$Oqso~v84QvbA%$+RVvyq%Soq4Og~`B4#60)o87dA%>>?rNSCxcD4moV%az-N_aTOJHBK@l+-b3_Pz3n!L4XBhM1j`dP`FItM z!tyLEFf-{uUc`~pQ=rTDjFpR)1@+0GAIO9_SHvLM4lge!~L5$Mj z=(!POUf5KkL>Ro0K(;~!QP7fNWP+sR?Ps{x=W&oV`M-dYX|h+y&y*d}p6-ZTWF^KT zgM3lwH&=6^xbe5<2*|A^@{I=je+ z{a~wP*%0P{nP3xJdh;&TnBVej6x?*npdX2h1(P2`Vg$w=`jiwHf!);^o>GRs!dC@i zat4uJsO0B`7bSo|;3mab7h4QXS2?2s0h0?KXL5-@9=7!s*&H|MH`vN-MCKtkx54~T zs8^n~O};v^)S!|b6g_r}CF!E5&yUN!3)LY1Zc|%@G$%MreFsorQF92!ylr}b^>j#S za4M5546u~-RxiVqJb}XI9CwX!BKmN6)B#?eo_tm+%^9E=Gf4iFkAoE@`G*9g#(*aA0^IU9!HHd+ zvH9UL6(U?=JzHnA#g-H9>HZP=1#REU_`L{W!7pNbNKA}i^}YloM{`a9ax?o1@x2WM z+3uNKjIc(?0rJgYC?_3N$6@8ZkgN_MG^>z$9+}EV z5CdVz{-?>617ohd?e*&aBB2)PfiR&AtYt#3l4TX*FK9zQq&_tF}Vq4uCcIt4XN!b68~Eo)G@%KYpFPq5{l2p{D-m=WMiTsM(Iv3Mo( zx*X{wi!sv@!r;|$n&(W$!zld6J-Qx@=tX5WD_Rdc^)wi|VKW`+GlOY1>Z$?rJ#Ej34vXJtu0o|U18KWO(O&Ni;vi9+Q1I?Cj%!~vZyy|ldj z*y}PH5W$Xos!X|KfCcF^of2}H(JV4uE=YR<%h)YM8rEfdK@nWUQnohF0E^s(Y0ZQ> z`=Ak=5+9y>KtJoy3e!)tetX_FYcjF3;b2^6UhgiRQ;|D{gi);089{Q9))F$AhY|#o zhaIJfcJZ^fQn9~3*-<3FugBpQP_rXOBJOx8triPr&UrK*xGaA>a8V`ilAB4y>-4Yv zn9YQh<$9cOoFw?eo*S>3LnGA=4VqsupF9luO@AkFhWp&uY|1!k0ovX#Z1k_1iog!4mqh56Jo9hF$!Nq-PXHgHzs9#l)aIp8vk(OzrZE1irZ&$v_@F4R#}^y9M>rFg>UwrLNE zA64so5k#QkLRCY9*sT&Wu78X8vL0JNRlL)47s-2i{4@q)qBhv}E$aN-(b19WPkXG; zA)EY0J9uc>c^D6G3+r)mI>u_SfuZHOaYKEDani53)|r{h@4^w71PSpGxr>_?gI}<) z#ETjx@M%8SI8WsG!DP1}t%S~t3_L54sM{^?M8p%aW2SbIcG5IN^60zufs5r2K!ICQ zY-I;($RU)YN`@CzUrP0-YN%fMFyuDgq>B=lE<&(~r1|NhT1Z~`s?+>ChS)sW$R=nXR^-m^rbru|pk{ErM2$LvSdK;Y zcy0?kj82{OxWv5@1vl*`8)!TWRQ6JZfS~)etMX9UbU&Ex!AE*U3vg3qGwbLd>Gm#a z#}ShL#7VBlV5+!=M(l=xN1OTpr}%yx2x|N|5FE8ueTA`&ztS?zl}M=cv8>U-aK%lP zn^+_^JSl)&f=_4f*`MM`7xM(gftb`s@W-BChQAzbUbxV(s7C0DsD%6$QiXmo1bD|7 z88MX1h0r~hT_x{Tx{~lX{1fG}E#jCfaVM~CFQnXTZcwT0^y+Nf?S}&dLPm)iE;Y2m zGzMoLM7Q*bAV{&Ul3AA_&Sb-1oDFC7giq~ofA3j^p1ju&Kh8QZ{Zum*ekjyT+qC^(^ zbJ3B?6pC`0o!7K+%USeaSPo8EqB2Su6qo5iNAFtisI1sM#g^n{BY+!MFVMoYDo@PZjpMZV;%|(8Vy%GCJ$it(z*@mdnwpd!b?s{5Uo}w=^}199JW*tw4Uxm|?9V8KGSO%di7-2JdubX1-xW zgXXZoXC~&e83-%yXQ64)BZclq*@~=*K7DFY1t zX)@-$G0zYh^HI?S2TdqVGM}v@X8JX%4Pv6I3POmY!+lF(iY1H@Na?Zs^2c7v`|ciP zdYvRUsDk*OB5md$V84*`iuXkpX;M9CdW`kW&CE!^N+Hz!SIOL7K*=UI$yl=kjJ$u1BGh*75@52^~C4O5)g_+GFxtiDY z@aYHQp}WdUj~Dy#A*jQV)+Kx+0xdZV%or;e{Jp9ttGZMhL*xlN;voUNDtW2fjs*301Cl>gkHg&6gmQTYISBV9 zx|^N~+0~gXNa_M{e$woo1Bw;*Mi_HEbvO;dQ#r!$a}FqIaQ`Tvj85o$D}K*4`0?U* zqN*DADPh!&XzSB%dNTiDdFs88o)JJu6v^Xixb7fe!C|&{ioB?NGu+L+)YM#5o$(*= zBbg3bk|00%8Nguo&K%8F_+7cPESk(zq;?Sd%MNXopC-PCpZ65Fd`2x+Bx%aBbDonQ z@_b$Mm8+RCw=qCrQaCNT6{G4PM8KqdC&X`Ga#=#5eUuffR>3+DLpViAErZo1Ya2fx zM}|K|3@H80QQrL>qZu~>nBFn_FixrlrjNqwFk&IfJGVxj}m@lPyO-1{Mr_}CWx zV5Gh5%Z8{-coABQYhXiwEFg!L5nt*R{Dwu~&3GAS8V#J0Pf&v=&kBn!DZ;K9!M@LP+On9cpfC%|Qw>S`wQpb1{Xh?_iSpiU{|Ln)q z{QHt#ciZOcBAt!^pHpsSR8%pQ%|_NRslAfgJe$T}RWBXjp42Y}>Ka*NJnb=}mazN; z!}vRgIMEn95Dd7)sPM4g#`h1*iZ@gWR9Mnv@_;1FRlJ}DbeFw8#OTL##qzfhbcd|+ z?{`NaMxU8Ru@jpS7N{!p+0V7DLfc=XqnsB$m>F!%19 ztc(TUTP-W>+v8L`8uLY$i=>VDykwvpaohK;)RE7_raR~l*S@bfGV16)i>mjO2klFf z4SBlv+8&l1*5LbHAuaE1!8Dniq+femII|A<*n=U~QG$M}j7@L^T5Hsx9w~}lL#S=U z^d$58o!cPK$wZl^tXdqd8tuz19-4oO-_!Ig?xu{68|;h`cMvd*7;z%37b|ai$@fi0 zoy@lEURK*e=)w=w@;6t>DVI!w8WA5|WY@M>IJ?_+xryLs`Jj1Hl!MEpnLHyaEUC&G z!-xU@07&u1UIaqe0y}5lwl#wt-udrz#B<-Q%0W+=rQcS);m~fL~9|`oOK2c0Y)8l?E5J z;OT~dVbiUi2}mWdVsbqTtp9xOpYpwnaAL7Q^d{go$}KG~j_;37s*F-{*?rptZoLX4 z^$5g8F8gRJ>lp|R;?wZ=o;or8pqdu@$wgfxE!E@PxDN`=#VsB8XBE&-AhZZ-XK01L9(@=zW^YjeO`&B;Tf!F7jKvvC!JmoX7NJtt;Vr(lTGg+aAA7yPD)- z*9M@qUkML~lploItTc#kOhyzOR7we|(FQ(2+1l9p)W zbo!iiuK}vl958qR`gP*m>y@_w-8I^mWY!X+1P@p`1Y7{Ct_0XazT5H%k-t~3+Q6b; zRj|{Q?=8LH5l?U>pO$Gnx*7sfPvdHny<~J5zm}RCCaORh2(`a;Ly>i5#YINt8ZxY- z#6_RL1ZC=^k3#I(&jz88y{Kyb4PBYZO#)XbbiI$=mogCJF&M>mvSrwI3vYV#U-JN^ z>5oH+Jq?AaO?35aUv8d;%4=Wtf|EHHLQ)264|%6P9D)l?PUEC62`5$Eaj-t+*KE{D z0!TYOKMf5x#PvI9=g?3^sE!(M(Wlru(8lB`jqau`xVd$x(rPqr=WEZP6n9UIa<4fp zQn=e|_c!s2{}087s1&rO1W8uUzDYXz-CKG0gxJc1jVFAql%xBVk-{a7}=!93HvX&tkDT`qFq`rc|?_$$?C60UuY>%2Mi-7r{xRDI& zQ4S_U`P4gcU}lUSE=g|{%+hw~IgmH&s^N;l6Px1%=*nHtLTAS^0(C4Tz0`4k>4-(kDl=?l< zo>lU`zCv3j`-b#aJH<+)E!ZH_$K=_P@{xC8w9zs_oX5iiu~lSodIZex%Pd1_a-y2#5^V(x$N7_I;}riuNn<03N0mv!lw}K zmTUFvP~2A%mh*2ML~g}e^4(mh#gR^J)`yVIplI^+z*Bt0fC)aNnqm9J8;01AiuNxB{4a0)qMEUjLT}bS zya@Al1pZfPbsmlro{qPFp^A|{53E8zlMX+e7xD$oW?&2N^*UhRXMQ428mmq?jVWZ+ z>>pDIUo|W!d|#neS9_+*pc45R@AH+nZ;Qy@xKbt+Rb+lJ%qMXRXj>(YTjhR>47wpN zoz7?ChTPlnp~v z)QLI!wWuo=aSwa#33?+eBspu?`+AMIHJ)k>H^;7}bw9Fvlv)DYH9wh7FEg}_X~w7u z99Sw4e8TV+Ew-FYM7#3x)# zZppj|TaE}1(CCM6QKf4vs$98;sFKe))-*@w5)wQ~p{;aN*f<^8&H*hNn?m>($E&-0 zHQ0)B9At4cv2kW3@;UlFe-+`0Vl#-L^^op-(2ZFE-A3PHV1?{FR9goKF=#qr+4KCp zwgQ)x1c!z0DSe<4j(P+QV!TDfP^WWD&ck+`XYm)J6vr{H9&4+e!ko0kdA-mt(TMhv z?sBZr-r2{O%Mld3g5i>X=T+y-2ifm5Ret8;dw|9iIyZ6$9JmGiY&hy9ZshID))3`B zaYrJ&+ze0a#uHWy)Ax_EmNA5Warkg<&P^2PC@9FgPJR8Mcv@BvBV``H_i1xW83Gph z1hf`IK@<(<95ScPXRa+YPB(nHdj8V*LGT!YYHRWK?~mH}Yl+$zBAvFg-CD(B2P3}l zdigIMiLa9%u}-Ao2n({0VBHo+?C*{}8gMTrL0FR&P(H6N8%@;`BK$lLM<8g-XM8g! z&t-E65!Ka*cb<#mOc8`r<)G7BUA~3*F-?cn@@%9Dbr0vmM|c1fyPS>4Y-4jNA?3-G zaBONJVfXYhzeU_qI=U!)A53=#151jf7dbi6%Wt&$QnAqkOJs(S43Mb``DQTON5v~R zOn@EkjuF3V$Y#al$9;H>Zq0mHhQ(vej9cL?rnzJ&(}+h^asZGvQN-Ltj&bI1UlRfLChes`DDy=tVa%h+1Y=eF*6@#4f~wXHdsI5hh*2 z08I5tF)1WIuAb!cg-0DSs5j&QFoV5SGb}Ma2n$qK8n>e;`2i()TUD#!>>*W#b)FZr zmWkX9q9}9Skc5o)x0DK3$K!50EbomFh|N$Lop_;#xG70^MwnbCzzsgo5*2+?_|3yF zuOK|EQQBYvQ{;ZKCEhs#o_n!EI>s#&x~%pe4D8Mfe=DCU#LtmByLk6DMIYLhCK zge4nQB#gfOK+zN`){Ou{D9J|Z1&xX&GfmHUvf?mB55^)-79iS9V6fdjv09P_8{!c9 z*77ecl%8-W5*n`C)ovZ4(bR-Cdgu+i4h9G9w~KWkO^Lb6#XD7sP6e)z1AUS_ z2bR;)>H3xA7emf1B^PC91VL{lnw%z3s0Hf+ywgkrL*k=ej1sSqHi!#?p12!tgz zwC$k^Z_vx(i(vhWpm87GtqHqa=sua-<9&IPSpj7l2Q{^e*RQi0x1?6iWwOPa62191!mt@AC%^VLux2)1`NMcIGzq~fx!h?3-mU_W^2^9X5iyW}G{8q4@`w;=#0t-M zg->j`je2U+_4Yp{s4}xJ`)rjco9AwuZUt|OE8p&PJ2P8igRE42?aPhBcfj@E!`=!e zQ#aoNf#vH7v4Uex{3IbcSbRc0M7sC0T&{ZVs$s4zD)JF#SC=L#PVjwcZX03ZGlU0(ltIFZ&=Bv< zoBKR7IMr%BFNcrcR%EjpZrtFz;^}59SdNV(RiDIF%mQT++GyWCu_P%LmnzF{7+s!pobNs{KSdmostf1@c|1QjpYm0J`!{XH9sKKQD0=8qbql{v3aw&V@qM_ z`T~R9AUQ#lw($*7h!KO^T$=7Hpg1-+KWF`Zrsg{!Mv|`48;qL zhdn!~1Ey!liqh>KCgm+uw;C#YdRd&fjn{@#oz@iUG_{0{`jynv$y4i{d`S;QGhb)N zQu)?|70t<$@Q|B0d7o=JK^}wMF>oa2Gv<1zG7*lCf}w{CWg7&T!Oqo;p&q!x+{imu z_73@#rUXRwNM1=&A9=Yrz@0|J(gEMq0hWjhj^TgIttgc^r5Wg9ho7f!FnA(CmZizj;mj;%hK(4c8m`z7B2J(Wnx@x>8>B(-{OL2>Ad3O; z_i1)TL3h>ou9E9=Nkp4|XEhio1}Ze~&Uy%|DFUkppHNe?$mUeOH{gs9k?xCP&W`A` znHOo4*M@-y(J6h@axp-Qr6WzCG~% zS`voed_a}~?n7{_=7;sBUlP}vcysoyv zOn)nF8fRubBq!f^FiH23$xS*hbuZu+K?h3*X$gou2hL(syZUCnM<$@uV_7uYs3gH7 zzvH(bb_Afby8EluFH}K#xS((*z2o9$j*7=>^$Q*fP^#C7K^GJ^W^-9dzGnJTq&WH3^1%qYM9_s5vfn80SI{;@W_XYNG5Pm=hR7kYJTBAfA5?C;# zx1#6}265SNS$ckD1?rg6Jf-u78=tzLuiGU&&KS}m1~BdU4zwNJ9^zY|MIT`bvxd*l zIYK$lhHJGQZI-#} z4 z{pjl(sp0`Yu`ev{6Xsf1KWbS8{c@kX59W4oZ3i>)o^f}Jf^%R6Dm9N*1cS}_uK}*% zqa)PRM6k9|Tdyq384q_c5cYfa>D-mI+otsD6zl2Npq6=%yPDG?U?N?%2?@S3Q{{bq z-AG?R71|TBmyC%plyogoN8k?%9QShkOp(W#@_L zi@59vI;DuJ#|aY6VbRife^SDz)gxk$fOV(XP_r%^E@|L$OOJVkwb{m8yErOec{i1& zl4=W>QlaS?9L$?Sc)DmsAo(dCpBbfR2ul4K84+}P8Y`>7UVOURtW2;hU3G$QsT%EZ zOnW8^_N@KFZFY$9pv_J92~>p1)~+N}q^X{fQ-2v#)>w;I@Ub7v1`HL|s{N$xgaw8X z)j6AC^_1ppfzK-0gIb}D+e&QuMX6>--V$h4co)Z@26?q@Yj@JJc2U&Up(M@z+SV)z zg_)WUpRZGG1}adR-C(uL!64AtSB%CqKVD(-Hn?hRXKKwYDfLzA6T3G7Ych9e=v@9|N4Vkj?cu}2 zt|_;!o#+Fo#1*r<#5s@urmrqK%i4JT(xu93vz@WhTc`@Q>TFhZpQfh#ZRC-KHcJLx z-B>7V-R00fxD?X&Fe`iYFN1qEq?ey=nzyIU5OZQpH*-_n*$|22!@jA@q^mR#OO4i| z>KCS^n2Z~T{;SJMrl;i8oqON_>TXWE}S z0zDP2w~E8nK0@ue-}K7nnEOld?%df&7HMz%1HCwF{q}}cZL2to*`@h$^=^XQmk#jP z=Uwrh`@8iGkqy4xmYG3c{KnOoy(w+@APN7%)`tkS|GMx(?9F2GVn$p0?SVs(MMv-# zVwCsN1zRqyzea~1f3^Mrt0Usk=c#LP_gji?WM0`7dr{(depHS3;=p@{&uh1k=T#PU zBxfEfxWV%Xh~ z!l^&x1Gh*H-?fy!QiC8Z*V|IK6mw;0AW!RjjF}Jn_p*u|bq7qDTc-q0Ie{`?bS-ML zU!eTa=Ta!Ten_->4|}%od`m~b#h9DL+0W0}`1<27MDlgcSiiD)g*jhnwMiGdGAt)NEk|UQZ+Ro%A;i{6|>-{b^8r!CcMQOPF8uCTH}W z%sVZdwKpH9%lp8bf4qTv=GpK9ZM;=wPVqTWkM+vKZ2LUv0v?_4M}_A>R7v)=blk}3 zZAdpfxB6frv|w`jiMwk#_H*glv$tMKx{lwEHR55yq>D9{?47HjFYIasZN6d8UG5<+ zNQ_&3-tE&Q?y>`q(GYvCpFM6d>c7r`0zH0Nk4zr7m?h@>G@&9k?5O3d&DT2`rmhrD zKk;t=i0u@=6`Vn4?dS48Ue^_<-Op@Xpg*rPazT|PKHco&(C)gpp4c{+c!DEg>-CK88>^C5_-(VqvFcJ`q-abdBR+=T~9VM*LDG>g(5yv=ix@Kg#{dD4IPK5B+Zuq1elXY`u}CS+d60uEJlDrQ{kLF4l^j(v zDJ({Jn%}S^&A(la+CFhT>Y5z&y>HrCg9C4sfeHCJhUv8BJ+J$pKaQ)wMvErfBI5p6 zOf{T8u=areMA zuOI#wo6x+MRz;m9PSjePfs6h3-W_l^>!hVn60e3v~MV#iZ(actK`&;khx(j?53pHP_RS&6?!sBL)XcAw zgz+4`ePD_H@Vf|3D`^Z8M$Orpq(w79}eF)mz(Lq`qoaG+eb1R z`po)JX#ivF-lbt*bd$5`oWc3He@*0TJb#R?+<)Tawv#h@S_OafS1B{T&zn*VfVHeQ2iZ9JbogoB3}#HW7_T+G^NT9(#V# zr|VotTCm#ZAk0Ohx_rC@ z9cn9LFWhTL>lhcCJc43BQPyY}Tr zSUN3eL0FehV6)tmtd~xn!tg!^PdeRGOSEht8P3VyzDMF|4KiAOKw2YxeXHtJk3G|1 zaBs_>mBfL3#3Dh`+pWeVsTs4uMYW;x6j3HO80>iU-u@CDX`Y=V^6WoJMCp%`NK!o; zoYr&TAX6}gbK)Q~FfRx1lpkcp0V4+p`_pkt_ElNL*yWlK+rDvZHED>cI_ zB>dPZpM#fC&UKTy>{IV1%bo0&hv%I$Ft)9HLJFPGbttPkTX+Qk9tYPVCy|QFSM?ZLqdF>Wv3s9jjhV_a*?_ZaI8k;_>4090b|EugT zm_t-eo@ShGpnzeA6?@Es`fs6?+SMp81=GP&f13YWXimg681OlR^{SD*WqX3+&?g$9 zG?9V%^gam&Ps~h6ol^p}Y@h#LzrC}#**4Ow!qxi3oA73==UH*DxZvac)h+P;w48Le z>fi^9b_JFdVtT|??9ivor?ThF!OdKQ16kSuT((3>#XGPb$Om=0Gt1v?#?9CDE`SLkN`ZIQJsht)Wdspfcl=3 z&52+@Lvk!SYE9By+0*52f`)P=?!)irB~DhXe3(rQ=$E2u8@9+RQ~e8QLg%_pvzmoq zAg2t~e4)6=vs4?SG1sbL)-DwZltTyGlBxXlz>^$v+C%a&#@Fj3?RoJ5Vf?VJz8au@ z(j2`7S~P{3+{Z&>UlLiJl${tb|Mo)b9CS_i{+fsFyE!UejX3q?s+%>Ox=L`x?1|hzuxvC9YdKSONx1 zj9+KgA|sO7 z$Exf{hiSK#%cOw8d;iHxIz>L*3^(dm zg_09o>A+H`vn0KaQ@;wXNBhz=6c{2gp>doX(?ELeWDdPr%%S241qNeigW==V0);&% z04%QgA1r?NkFfZXUFEsI^{e`;-@pSj$5f#@2z9p=_blq{1x?I54wo}=>^4W7`DIzT z9onbukz?ECwt1GywD&xabp|-Xl%brbo_sFZ&DH@t6S#Kg#5CGVSB&frEiJ}mnhuZ^ ztS1-#`Gy$XY)L%?;#)ZV=9^6yvGF_-eAqE}O$fZ_HVHZf?@x3alAJ*!kKbd=uHs`^ z`%mK!A|?YRj;IxaxfZ(;DARP&?u<)o5+a7utllkDC(cr^a{<3d&PH>_}!;n&K%!52|k53AnzUf2~n_f257fN=ga z*d2X>TFlhwe?d-m27uOMdlV@YD5-5=S^A$OIX%!$UYZ@S>BehM!|iR$nAwI?E{KpJmyK+bfLwmGnLBwa z7%Z|3$FDSF`JA_yb9e-r{s6(r7L%fyYeD>tWKv5EVzB|IXJR?tl^$d`GY^rlSxv*2 zW*Q_7#Xil+>zPT;Q#&vqbqay-+Q;;-@ETSduJq|;BWh6V`lpV`U;c7zXY$_sNk7ck zX1z=X7rugr3d$Vyej!u42UdT^M=7Rk=u8yCsDS{b=G>Bg=aD>Zn5_SxFhgtJ8F^k{ z-BXHMhYfW`KiX`ML2tOUwD*Wj(;wSYVywgj0>zk8NCgzb8UF?=MP0-T1il&fk6{WO zT>2qIkab`KrKSJWL*@R5(TZj;s$Q3i5_;oR5W5N9a*jtwc!A#EpUy~%p6xbBX5}y)~>2Z|DyGm0UPs;R4^J~Ef1b`ZH|D)8C2F3rFX2uJuG2k=ND>axq>-{gQ5 zkhvy9XlJ~CQB2$is`LclE6Y7BB70K|^Zm0tS^LwRwLF%MKQw&tXnk2$022+J8Bdc< zUJkJjotRFuuW%!**Ji-cOILFY%x|iX`bO3*CO9PV>7?4P)Cuj7G!3@^Y~+)g(j@}$ z5e7RS8Qg#7)~}C@@B=sjHG3opupq@XuHr@i_HDpA*&gM^8w@Ulvl-!| z#N|)%Zh}7=A~u6&mDEp4haWx^dSrKmWsd0}3AqOQ`>w{_YwUhIOhVm?N_c!{M;AWO z7P7;qjAMaOkqs`8fPb8b0_Hbss%wo$P- zXn{$Ji!w6%%bg>td@o<;tF?KfLSk2Z&G|2;x1++xdytbS7fGsQq{lX?aN}KI&1k{| zECEZ)C@i+C3G$NYId} z*?Q-qsJ8z?iNnQ)$j7Y|L{Mt z4n)zU$+#zs{9N=dUJq}vfptc{uETdrzvNkr&(WkEwfC4qs7@ z@Qf()$ER%gG&Xg&?^^PlMtZ{+%P+Lmk;a!`ok_&U1@4E!ky%X)Cr`;3H2&$UQ7#KS zWjg^BcI(v4$)77&i?F~z6{CL2t%NsykLzE1r|u~|H$D~nl=U?!>|0{(-GIq~XMfS; zsOsKdT|;GI;WmEP-oauyMx)-Bukrq60Q4PqD{V!7=ZYiJKqMddWYz!gE;=Q^yT_Rr z6CtJpC1u7uk>L2&THIzLnl;tqIJ`3Ox%gRC%`fQskdp*i`GBvcNupO81r^(zzi9?S@#|WB!dvzS)T)M_W;v!XT+`3 z;@93AZ<`=~xpC1HA{ub}I45O?^l!F3ik>Pt~@2W_9d? zk?*fX;YhMv!t|p?Eqr~SkV$QeD9H8Vnt~zMO1eV}EG6GeD>YLB!zB$dZ^A&=zt7xj zs6!g{P1Zjc|i4u ze`=nYWy)6)Dp-x{XfR!rIbdnM>Ma(FE`TNZz?G72eF%k-jku43B^-NZl_%n#uoQ2Q zkB5RBmZdfay%N#EuH3a-Kja~krwU>&TWrNu35qwl9`}`NN5^v7gEPsujhwgk9|^&j z1KC$)Z<;BvJ&vdX<8}#ZJY)O2(iWcpDs5k-S>(U+K*U#(0pa=Vcy!01k{1@p`x%A$ zZ9aft;$#+OYS(48@c_pzVcR&DOI6INmIn1MJsHK!y&L`D6Fv9BLt2hfQMM08i7#d= zCRE>o&jl|&;GRbA`>uKvV5UrZ#4d4O%b%b>$C+WO1O1eCWAH!^E1C5JI`jM<8A>hQ zf{QTi;;{7Fhh+$$zkD;$F`?;n0SrO!_BPWEzfk=MAgBO-lD0KSzLC>2cqH!0(lLeQ zTaN_NPts6IjI!-{zVL>$7e}Vc0J!%`9$pbiEW__@<|T|Kqvl*ex9VDDzXykkBH4`(k9;zlaH z@4Xd${yj@Zss5(Ff(zu09Y30WY5%z-SfGpH-GRHfyNQvX!y)R(tRo7ROgzp&N1DjK zN{2=B?jjCH;TV3W0lu69lcYS%qe%!_Ao=#iafF(%d=|)y7xybbuSxl^6FU+<_vA{PKe^4dG+gzWEv5(VId`cFQ|59R-TWTh3sKU-UxzFwV8 zpS9i2aHVIKHe=gvYBa&+0(QA~mT9P?z6NJ!jScK25f>l3(q~s*q!qJ+7ORD~LL~uJ zty%sA)w~7l9Co6O{?1ODUak%)yTO_(2{l$d9ME^A4eZe_Yi_1V_$NJdZ*`aa?z{Wr z!p3SCNfFa@f2>tXiNUahJjoPB7KQiYATel(9311+1v9naV6Kw`>*9bgMLNX}yHhW~ znx3X}igrbeV_91wgwTV9nWM;skD$St@L9yM$f}jT8}x;V_v*;m1b&lU5uor@S=NX@ zft|Y$kq%iUGKzb9>ttWP0Gg{GeSyLFyoUY$Jk24`Y3y!enp>RulyIyIf*t(36HS`98Q(nQSjl`qAl(H zS_N*GIAOVHIJ9B0^aosqu_e5#A4dbIucOB?pLpSYI2EHov0eD@b&)Ius_G;8=0r;% zjr2?`@0$emGu$@{jL}0|uXRprQCO`avYU2j&UBjjr!9x^h)8^r3yC$yddO!_p~1Re z<@z6}%HwtZyFHk}Ao{peKb#w`7zAz3)lYAS``c&_-E=v~e90U5IOkG_!|_uY#B)H{ z@I;LPfYC$>s4)Vyxi6ml0+9`e00D9S2wlaQdvk(i!(t_NZch@W$Q zZ|onmezk_~Ax*U^s@T9jXj;n^IeZ+ErwTfnR1U}h$fYtZW4fF2km|DJ>-}|Z1JJ?I zEPaB0t3P5*RQt`}d9b8%>R@GBiQC{>n0v@7CTps=0Dc_X@c?lg5xFRF+L*?+ZW)8{ z{*z)5f$eyz%K3aC4t;u?5$fA1 z^@PlAvIlQ$eNy+vf~h;L`fcNmw8rwi`9>X96UrWmGFR+?blh9Veo_5u^5LM?*p&$XmIQGr7(gksFHr*v9mo%t62A!-4JxYGEDcKsHwj|8n zP>vXP8y4NZphals_C*Hs-s5SQ9X3#)ZjXbuTEo`s>Wzztqkg2hpoQ^|9}Vi_8qObF z9UvjS%@8ZgEh9uWX6u6_+}ClESu_SHvijHmL{wtNqEicVaO}S^P-miFA}%L6r#5FJ zr&q}QE?CF>UD#hHbF9ddLEC{fbApxju7NSDT8VtgOP7HA$SU_u2tgC?&LQ$(l!gK5 zhNVO%E=QxVH+jQ=P6)U7_LdwhaThtS+fjN8vDFfsAU$ijOfFebyh>@-v};A}T?;hA z)`C0)vuGj?;rJ!OQH(Jt1CNI7|bl5+STpKu#wempEQ z6B^_U_UNbydw9PqS=T@damzxoe>^`eU1xIXoNANpU88}xjY9@^Mmhj9iI z6-io)e^1bdGt>&&9~=7UeN(9rXMLDu4~#}z(JLa`4+m%;K9RgAyzHLi0c`>*HMH-! z+b$xvfSCa4=*q(LT$WZ6>~msdWZ*QE+TU;f1Oi3t53Yfcjs3Dp6tkwu5pW{uhyke? zOONHey~ZxX_V=t>y}^a|*MzkrS0|EI>l_2`tJlrUdfmCxrZXGOZ=w|;W*_`id6B=bVVsEe3172^i{>tp;23czjfU*&MQfbZTX$ID?^d zaQ95}wY+juPUxF-;6lYY2#~#77Z3Bon&Ed7m6_v`W+yntkNmCakGC0i-=+SljW(<7HB*9 zB-N?yInK>Or`0@6@-;HjlTuye7;TkG_wcD3@*XK|4l+HLQdoH7;Wt3M5`qydiTW=} z;h{pg4*JKGHvj>QBs&nk8klEbh7s}*D#f3xEiA_DIPxKPnp2szMVcDe1jnJPygfykQ7Jh$RVsR%@g8K* z{bfZPKx&$IFU%~Of34Mi(CoW<@{1&yw;o0TWf!0f?Sm^$-4)`z8}PL{_rT`)I;Z;4 zGStgCT!D@3#}^A3iE`FD2z?IVy1RENh<{ZgBl<_YOSUL2xW=%_&9K_QS`4p$=|DKm zqvZj7TKi5PDW;L6VG6AWaJU6AGRUS2e^dr4fDv{~aR~LP|JtM55q+-nLSW*`q>W7d;2rVdg&=9_(HHDpi>t^sZ8aYE zdQ4cKw|3d^e6=@WZuqnd^T1SSr63gIBCvL(5V7y8G@Dm$)@i&h1wjxP$E{YA&i-)f z^)OGskGaBmwns3@^L9q)`65Ph$daNTM7PbdLpFn4;fJ{1GA+smT!S5%==qU7v1sJp zQ^8UZg^h=qq$EO7<@PtS=OY@vhEdWR`W|_4Q#j(?!POp}%?F6BVDoBccfYS&W815; z`saYHB6Mpf7r(pus}cXjzyAmIbsk8@Q6%IuHr881=xB`>n|2eKGei1FMW59D`6-ok*x|w?F~Ip^J5-D?-7PAl~QBJ zI`_1-yNm)k_JXl=6V?$ijMR3uVq7mPtY0aztMMNT>iVW+bE91*_9jBvjQtu1<3K6^)auARCV%9Nf z`)2a1BCT;b925#V9||*gG)k~?e6{}F%rEE_9cWIUiv1JD0d4n$vzV-ATK$wD1>18u zXfCkdcFXkJYv8Wz2vFF#J}XKk?5jbS9GEz*vf142wzH}J+gTEmrSi!Q)$wjxNmFg^ z5CuL{Zp%b5oQW!{k3WDSM`V61_U8KsiL?#q1TeXW>or{Co$l1FsC6&dT^uV8fCpvu z@euWhsxSvq}VVf>6lCXYVy;$vhDCjgXdFfU2 zsYOLEh~FDi&+ZJB^~U zsL~bBAvzw7nx6X(Ef%+6Zw40FL5kBu6Y6yFZpzeSML(iZUgg6=io-npbH&e|x4uyY za$uU!E}2+JQ*eFnEk5?Cnpbb?LFPCWXcf95#+9gcyZv$wd%?r=S~( z;h68S@^fP09IIN{F@g`rnDR)Dzi8wh@J>H&%uIIL96Nj)l+L@&UaSaT1ubmrzkO}~ z{R9v!w*TdpdMU;{o7R~oL!DU7=XNrILD0_l9lw<2``x*hqo0o!p$6w`?^KSWdQ0YR zfB(pp2i!8`lD25lP8t<@!VqY=U$P>+_*NZI$UWNGS4=5&8j+J&N3QOSLctio;Fsaf zkyy%j&H`Mj!N9G`S@QrczI?7#^(yEysF<81uop17v`y9)Dp36dE2ZN(IvbhnVGPh~ zkFBI;%dHO;Vp=Ic*A&AAZu;1$y*;5r^vHbO#13ar$^hW^Ebbyujca$RalYRB?KLZk z=3>u=&(aV>?uIWV+j(mr2(t9M?jmQ*mN=2fbB9#ffc25vmi_`<~pHIyp# zoO~SH8dhYGJg2$lmK$P?T28vl3ezHz9Nw%yp8~vZK-`REd; z++mOSTcI;@U4i#%>|&tFjDew{7fBo1^^^HQv-y=NTKj;ASupw&5~l*E(pEvxJZJPn zD+`-2O)ExXTX{A+DSEmXEy>sL2%MWHT5KxPU#h@{m2nqu)hGS^n1YjokCYPm&aCYX zWP;c1a+zJJf0sRG^Bku0)uG{QWW~6CrN|g|_nIj46jd?~y{v!9!QeS3Iiaz{@`LJG zGb>XV0ho|3v7n5tF&)d+MyBj-a`{xJR#ic?$nzA&cSJoYeqV5HRTJ$bY20<)eT{Q8 z{9wug3I-8uYgs?PIL=Lt<`dr9$^kF>w{I?8VOsEQ2ujgqvxnMyp?N=%T;!{ zTo?zUj6Ii~mb8kipE*YHEDRs@FqW(bN_z005E%otfN{S=Rx*qmX66@)1b!fdnE^v* zW3UClX|@=U^fVJ*k*iRSh~F0^-bJ z`?UsCTy>cg{h7kV=P~x7C62WQj}?htLRX<2TmX+4#f6@aWV!%)wQFK;8th@;!c0UgXBB zqyc}zc5Qg@=Inf>&svL*!l4~h^Ji0#j*Cu-HYx12ISg*5^WDWW5PS|Xvt?;gF!EiN z$rJy`cP*M%e&RJ~2Sf|lXDM#Ca+F?AnQB*@LAdtow&?gyo~vL0hyu_M$%~5al*utM zW>k?I6cZCx5gCS{CHcP{2gDm2S;(;iMP-N@x51Dw@V8@cUy16*G;JgyHf(e%>5%PJ zx9$FfGYfSJF7<#tXvYzmeq!OX8MYf7O83+8UH7USLSfe{Gqy?|pbj$SbT2&pH!B@v zUae|>k;(se8lYU20`R}s(D60Dr(qDGS67LEmx=Pvu-Gzzfo3`c z9lQ)f_WH$fz)e^&O!1+E1ZTVjrl|Pa`UpMc0@~TUyBsl`N>2@KXSEC$7H`I@R$MXm z+8j7wY}diIC6*c_Dkqx7qm9DmMVU7DC0Uy%0X0sRaHXjR2a+C#(&AU90U0hTg{EnH z4lfYQ!dBpDwLo4T&*etu3HzaBG>%QL@w9TBVmXAhnQ+fS2}Dc8&?bA-qlQgqvHx>Q zM|kxcA+!y-UTv`1sH9Hu3}4Z%ulxH9Fa_h!{-mueiQ}9p=e0o2v*2E)S3C)`#g-kQ zZ}8pg0kS36a5wg~Y`%ErS~K~@8}p;$y_ZL0-m`!_|L?f8V)fQAJfOKEfHLL#7%&q< zr+H!muQSu{ZtaILX4^CMiFnhF(y7q!P;?B+h_kE+1I*UuU0|2d6ddUYE*m&qQxavi z%Rlo&A}|4IA?VP=_d?2w?&QxfcLH};lFkP2*t7b&O-UU=SokOHCpZ;CWyleoU2bul zV?I@5TSKVLUJLmh)M~#2ME=_&TguV6oPTm+a?;Hn-)1=5t?s{wm7D0z3OM@vPdwex;c*C4(E>7OTMZT=!V|Mm|~7mPdjr%(yA+ zso*F@l4c5%co+9V0yrMv!T6LP*0a@a57(W1ltJ<> z`@x@49Dr=b@;DP8B6dGI!klx;^9f&d$69UyC=2Pba%ybH)wI4Pcu@tWBlW639x!)> z7dHhE0UMGA(CI>QYO;%e!`4UIfP&;#A1Pk{OD@7SoNC5$m3FOAC-S_+by%J^%8p4- zGHPxS?CMKyYUdOiVQRf?qMeERV1z1MPCH1t2KvL&$Ug@C26)*EGkGp$2`hf@Af6)F z1TSTUNhVr$kd97i4*mGP31rih4XLvXSNfn>koTC715DV54TOIim{$I}6#$*Wrnh1P zeZP|tOA;z7p-*utlEqp_{DEZT62`OMn1&-fTGxoP0Srv{Kdb?o&5^^vmv?%6P3M33 zj(e+zEv|2F-&B{yvk{~ocCQ)nU*PdMxYD7?}=f)ZIh2D&tw0Nlp+=LKMXCt zTKYc7yA%VWmuL&km+;)U=@$MqXr zgY(1yDrM`^A6?g0>|aOWEP0JU`tD9|1JBq;*Nf79bptvi(F$fe_psxB}fJb4qci%*f=2{cp^J zvwc0!@0Ck4950DQKayqN(>}4VLSM=|y3{fB1TCQmvT-~o4vl0-vG$ws;e`TJPhPlZ6p zkQQEi_0!r1$=DiJ0U5Fb)_}C4=2WgTzq9i;g$_^t-$beutdQ~x?gL)RSiZ6B z5WBhn{kEvfmB?9J;(;ZJe7I!Z6(%)p^Kh&%5V}1LScRiF-(Dw=zVb>f@o|K|%J^hW z&_s9J(}U>s*cn{70}B+(DG_+T>9=#tW*${^r2bEWwlkK~`dXpnqDbLOQKrCQublm~ zRB0o~IFIpw`U2W7Y{(l?Mw+j_lQ!yqJ4fjO0x-!XFUc!0Rus?YA?}?q13KjUY((A- zvjb+EkWVrGw2QG_2?XGMA(r9SJNywLKR5I`atu5D?KtP0iIK3N;>`C6<8Wa z)T0@9A7$80bJLpCV9MCxy#7JhgYa7k*GEqur<$Ex)d^b0h5F6mLN{(FHJ<#Vg!F$d zv#?&7rJC5wlk=+A6%+f-L&7Yb!6&r!P3odg&qv`@3fw9X=ik9Uk&b3jQHouIq3YnW znnL@JT1?|UdcMv0Z2RP)(|BiO{5~kbq9e020!O>PbRY|DUO^OIR)m`Qcb(>pSU<$m zEZZ=bFk$lyW$L;fF;YZf=4cB|i;2DbMc8kZdJ=q(BSI~e$iuUYXx(Rq4f&8oRC%gD$JVyeGPAQH~FI3m# z|Iw<49Xr(*Txs4$RH2GwjXe)Cb8mi*h_1c0xXv$w4g0I8&N!a+#iO878?eI!q5GbQ z7$`E*?=L16>nEVhh3X?sw6fs+xjG(RDBMEAH+X)P&z0@uNg@%Bq!BCN#)C`^In0mO zlE^WjmQFfa;GZBr23~LmV5`|<^5aq%gG*;YU^MFOXTrh+Pb<0$lbo0I`bD7Aa#Fwt zR#R-?diSCTtGb|R;~w7rf@2D>tqm@1FRduS!a3I0^5tSV??(MPt$5!71|H&d_Kng* zzc%s_CGNkYMq{cHf{y>R`^B?6Dhlk^@36dO+0idGTd8zr?%iBGi;D3sz5${aA((+4 z5cJ0#7pOfnGmvBY7Yw90bl!1lXH~=Y zDYtV0hP{0)Mrr`>`+2zFQoOD!P-(5G*F0YTE<{#4W%=aziQD|Y#^1$D{0o+}@{K%V z{SF*5R2R5M1n+7KK{jeJ6^se;$+ORU$4ieb3`Jk77@Fg&7}l?}cUiAA9sYZ)d_LFZ zu=RsijO!89p+nNK^!#HXtO=YyWI&r%n~ zGbL4Ak_Ir+)c7gDh_ZTPnRIEX(a|LY66yFXLO~CW-8lCQOMjd+aeky+tiO-WZ*P{3 zSkFwm@*t-OVZ>K`7vAp)%|HndPdmI>7Jnx$xpwVlI14v8jQDwnvzvH%J zXVp1^-xhxLf!}!5WU2Zo?n%jeeNMp~3Z)Sgg@<#G13^h*yIx)jjPWQad+Hk+e4N&! zsN6Q7@2kGJDY%XrsEbaU8=GYf7Yu$l8$NfEo)1{Ysm~zCNWjJ zfV_a$a`fbJ5Gm;8}dqB3UL7On|nxURr-t_jBoPn%VB!RLw+YZEne9CY?EmlrpmqhLy5PpqbJ8O<|qAfbm70UVei;SEeM$Ki~Di)sJ*H zt3x$ws^^Gy7TiSp2`hOlSb;0?SZu&5$|nsBhSUP@_T%i{a?Yuup|_4&5Ckyu0+x%w z^?tzFv($WpL2SC&IWpk%LuAAIU$ReVfJ_r1YsY9UC0y7m_s(qr7Z8w7T9=wvR?ob- zeEF=>&D?No;sRIWjctOCgzfd8@0+l>!M&@zJDL05%NQtN%xwvkXjTLQ|D0VY&;#s` z2iCmE@IB-gQO$ioS66U5V)fCst>W?D;V*N+rIIs z=^PqB^wTS&z2Uc`)5JXS9%ZP0B~l3ea|lUm)+_I1lEq@#gfLTd>9JToO(&$*Mn%VA z1Grmrx9{22EwyA8cE!KG7H0H@**OZymU5>Q?Rn;jBG0B?`y2i z`N8v0vUo-KV1`FqO`(d=p%97afxsAuHPFCzu$$q#)|G!)N|-s0xSXE|C9zGuSZ<>h z()V;PoAe8J!}hv|m^RQ8 z9fFhUQ~!U#){goGK4q4M)8A!H&Zx!t#Br`mdJz2+(0RG`wpQudw zk%yTvG-BH$r0i47=RM?BSRY34ia+f!UbEjBIV)w8R6ac!)NkCAsRiG9#^aLs&>D|C zj~Vg%((lJU57&8>4kxvorA)Lb8B6s;seaHXY}{<|R83l5(xRPM-)R3cTD4{~b`iKe z$LJeo5Q8NZtom}lP(ZF4o9pUKqPH&ZIs8h;_mui@iC6McyuYPd^j#K|6p_eS@2@8v zHliG-8XM#ZwWI^ljH5JcEyz*jCy|NHhk*k3PXQ9hUJrpv9cG=?oAj%q-2-y`Eb7-C z3g^cU3hd8XOY@+9awb;Y`Ddvy*%=)514@*xvy4GI=m7TF_V15I0~~V@!9CnH>;>>k z(>|r^+OC7prx=a@5?u-I?OOkOz3Cc<-YNy(;#F0W9Go~YoKzgCHF z06)$y82QPH-4Tv5(QPIqwb($p-l2_1zyVTePOjNvbmX109%`yrD`#>acItPLzlUZf z2Ehoto14v)jUQ=^A%jG~PVA}B`{e;}x`^=T*vUVEV?a|WH9QC>`)NX=gvruRn1S08 zz7r_^J7#SdKTLrTXh!gH;(ICDtD?!kebU2~DxHIKR*aH<`%3csKz-wz4<3gr`a6fb zfKf-`$RR~Pl4m^AFb@Z`MHU_vVYY?0H(IHx=`@colLkBjTj`%ho8bkG0>S&vLDHY4 z0t)ux$9a#AOhhootlv$Tg(X)keh>9KF%l=MvG4~>YLDQ{C>-JbKR##f zgn1=a5GS_RcbKoiJP9@ve!;1c+w$c_B{w5i6rqh3+zjBY!p!!Q|7cjxD z(Z!E_-MaC^&jW2F)qKuvPQ(}4uJ zco5*Oh?HW6w2edb22GslNoj^3*Ypa$cXvfGlYDT62!>|?40pdY)qPUr2+H9GKASGx zqA7G{#vNTC$B+T{v%eUsU$Pr2eS|oxoej8m@FKU^65My_%*Q;$Mzi`)5R37(CI3=O zHGp1ruQdg(>+!m?7o>8AP9$*HVPS(h0Ihc-k44G=!oQ00(Rt$_^Qj-i00!tM=2xr; zKpJn90Xj6r?w`OZyk8-IoVud@+Z^H>%WEA%EX3Dk6T@}hUrl4DOlxQfGo=gmNlm{h zmF|``E6Xv=SM>_3z&Vtj=o+p*U#4X5U{C^%MX$~~E1?a2 z%dT#m%Sm3cH9$I_1|vdrG@ids|GcIJ-U|q!ytA#PDF~5S0TJfy(tYcu=g6m7Chavi zz;!gy*c{Js{#$yF76RcKzpu0+u>F3gQ&=feRqd?09iVmh6s$!Mknt`y5Mb0Y?}Oxb zj=0?%xfIL!T$PlGi)z|Xc>CTA@bWtCa9xMpSALu8r2tgBCi(A8UcfQS^SijSLI5;% zW|i~8C1ezZ9n?L#e(gOs6mYwL!(OkArsP;dj(0>BC9>pns`x|r@ zF6{Y=1e_%G?AjE>lcnSpQD(|gaD2bS45v&Zk!>4~B!5+}Z%nK|#vI2`7Pz?Toeps! z_X|5Iahjuc4-;Kp&Rw^8h;t}&H(aB3q(pVTxsdQ-ZtY}vPPkvS3P5QuU7elWxeaE6 zA?*e%_LHs=YFjZ$_H2BD4bC4vN2$ z(I8~RwO_@j*mpB4+hfE`S~96*x$2bOUY&?@Iq8K4CKH)n4E`73!VN<-enQ}$gtmkG z7k|3+d3ZhG^K@_^IAw&?n-0hD8RZSlW4P`;0IXK6WqI zyZpK^S(xU_;C`?=Ui+~e-sG`#=Avz#i>mMTd*d7Q74`Yb;cKXYSJ_9~O~I9-cai!~ z%;C35zOeS6Imy$afB_2iLsC{QsREb4hOHfo31(CXGs{*?elm{(XW(Mj&Ir~$3I!1+ zZ(eBuyDs>FT>+{FwHFh9d?LLG4&icqdhD3Rwv9tav@j!o#vL$I4qpRF6op0$*@T9) z+z$~9&G~Um^7}~S_8iE^)_%wBetPEJ+$G?6nT`O;+&S#@{P)|pq+vS1)#O)!BUu_i zu8eN9srz5LYVHQz!&}o+oqoOK5N>UC;ACoH=A+O;kF)8f zU%SG4i%bwb3l|6%j$*c4qAk*A`%91Anw)4cUj-jaEM7(!57SZr>WOf!!`}{F)}M{b z$%}q~f+l$=FhWTf7F7o9HF@g8kMLXTn@T$RK=(YJp5lWk2JVY^vYNrt z0xQQXUeUXwzR{|^CoOFaxDe(q`?E-HV9@g8C!F^h{a>Ko{}UMUlZBaX>Vb(kw#sAA z=z9gdKd(in3+cb1n%@HJ4JBxX9o|Kd*GIV_;-#ooR2qG4beR5XxUf7D(75E<@AdbAU=NX{!7ZDsf0@S|Hs-}M@9LrZNq{n7)S~tT_RGBbPSzR3eqVl zEiu5r08#=1lETmiA=2HU4k0NuG$=7N0}L<>`QEel{>AgG@7v%0_Im%CwXT`P&2`!hO(@=Dd z(rF3YNI!^mC}s|f#SFkV#p5jZr?Y!g#g+WU@sl$)QSmH`LN2a$Q5I#9cT+}+M1}fnNFd3c%IGaP8)?24jR-s#GZZ&;^*hb z6(C?gUX7hj;QO#oOjd!9{~(ue@upbk@L2dSl|fBtI({ZUy|#JGLPiE6jboHcQF+d~ z;-vah*M-)&NdRS*TcEGd1H1d^r1o-=_uyC&&<}CqLcqCqxtMQW+MCFB)4E8(tI7*J zZWs5|U-}JO(Q~X~vkXx?LQ1rnbh6*8xeK+M_&YsD4Qi^w8fQfQ1I{0-v6Ps+4|?CJH3Y(|lH-h499 zZ;cyVmP6JYsOUr-cbK@3^nF?GL49~OcKFCS>Fi@pZ0k?Xad&O1?f9*Dv_?OzQ=dY$ zJunR^9SCQl?=J#{UO{|cCmQ4THM%fQ37hNTSRI^ikTgCIBL}C?xAvsQRPmXlXI0YW zu9yhleEWzg@5J9BB!cJ-$3^G`y1;_8HjEvg5Z6okMIgts8(jR8Yq@J#eU?3aQ&DU)SiI{<0SM2i_s7yB!nYM0woI8VZu;ZBm$164ug(7ix5vR}C ztAW9wxU~F!kqz>L&rGj&RgU-<>OrLXT`Q63i@Koo@&C@3ymS$32&8JR{uv@jm48*Z zFIXq9fQ|e4u+7mrk&%$`k@OpRr_l-;sTPT6Ko{-6AS=hg&BSGs{QRx{ItwQ(vw4k~ zX7XorsmW`|1UoR^mUlO+Fu!+Y^NK+vcZ#)`7WS3_nw91(4K^h0Y`)dCJf7c(WpC8TM{NXpx!38@?F{KBfG zP~{ItUS+x&hH{>PCK(nNFJI@2Jl3yzPGEiTwBHq%+7^NA0X)fG`(mcf48W-Lop1XJ zG$Rso?E6&!x-r>SCs#l$m{vR=u;D6;mo-F>+)Js8S*^@Ed{#92iXO5o* zmF675fUU%kuNE&cS%IO-<4)vb$$PxYGBP8T@4}7zM_k6Iz{K8@Y`QC8j^)MDE0;lJ zSG2Ba|6rE4!yZUZz1p#td_h0Ou>6~<+b8n$4!3{)66>U=D&Rf}08`9TiUHdXM7h69 z=fT7@7kWs&3)q$W;!`Ut@aSQ|_3b#?n>OIiaO>h*?Zc;`h*O~Wff{oIZt}?}G(IB6 zO}O|L>%6mwi({kh^OEQKMU!I}J6Q!!Q;xK(S{3UZ{n!9Cp^b16`cDk(BQ)Z1-^TbK zK8E5i9g@*wtGT2Fdl4^bF7!O+hGLgdYj#-xlu@paII>jb=PJ-jVLN|$`Z{;ei>{D> zElq>TEq|X0-dtE1WI0r*_NMSkno!rb-Io)p!`5W-rm=@dFJ;LU^*NQzJOKRJ~C znhi;)MuXVmSf3!O!oY`2b0>E({HTi8QoZ9CF`1Jo04M>h+T4RRsG)0#dv=T|qbhB% zpRXdQ^4Hv5iFo1^e(u3`E-fOw3QRt{zj!G3$1DK8e$C00a5|@2W06=0s=(g97zw~f z$ZTNcdA~>B^3RK%L3djmkR8FrjD7dTD;D&L=$#fIX%J>EfS>x(K{A^&y%jEVI*lOm*AiR!J$r|xlQ zdn+b=reQ{_R^kt6$QtWGT6)*^9^+wFA<)Waf0U&LjAjx=G#4!IGV;R8+Av@xDXlIX z`Pn9wq{lx0vS1{RjeC&&4toz$^l5J(jZ?4N{3AEXI)TZ)Q-<8rYR&$B!ppwpT9<5O zPWsyyr`otcWcU9sMAjYoKM^~9@fT@^Yh5w|Py8Ch#Y#eDltFQjQTLK7|Pv+?Y6y1Fr(?59oDpN+;zU?`z>A#6UL2RU_ez1(~_vISb#{+m?+EdidL9m*R8Mp z)lyE~RUFd({j9sXrb+ux2y+~uo4nS8!v`((0n8Rb*VK6v&BB#z!y{|~Iv_MicMZvV5pf7CD?ab&--8@ zb1N5yE^e~KK056CqT915e-8y%v!AXif|sNoPY4E>`$F@7CzKb!yQcP8hqnlbd=pZ% zfr5R|;BSe2pm!$5Cn(PEfY&zX*Sn1#;O1^JGSb``5XC_F6&&~PGuF#Fw(gnpdG~7o zIb$a+W|a|@ahmpat~2x3cOEab%}7t*%ME(HLQE;;MeZX><5TW4EsZ=0Je>bEwI>>d z+oLOr*u{@Vn8Ug~^ozm~Tx&7s#iu4X^D+3;L=+%RVzYd)fHU(?Zn6AVRl(@dAE^lh zQONr_W`dD5d)ILI%Fx#VH||QKrIz@;v%M`b9R;hv2dhSx(I({EJk$A`DQCTh!gXf$ zlItJ%WDybM;|?*91=ePTl(${Gp9w1BD0iPLezl0mjL5Y&@-@U|x_c*w`2hG^d&)l7 z0VAonHL|_Dkp@T0_jSz@m0jx@W|u<)qr_Q(qtA(IN&Zh^$GF;h1K8aU8-hp z)~p$QlU8p8oEHSlksw@Lebkx{d&f$8TfUE-jrsdGPZwxP1(18z=w9>^a6qTNZy^d0 z+x&+WlM_eF}YTYqWvi%lJ+r5U|OxvGaO7`(h;xV=VB#@8W&D?|XfHT^I0j4Dr6N zlJVJ}G8B2~Jfi~997y_SD2zW`)-Y$=!hGo$(8C_qe#^*p?f}AC0JXXl%=nVg_kt

=_mi7?^5xsq+55UTM~3A7K`HuspI@W?1PxY&ldxBZ+j5IC{&n&>-SJ zD!=AO;s+GHs!!6c;o>WZf|wuU?#O39%67orA!8{At&O|O2Mr|SVw*D3uv{)yl$ zb?=veOE&k&X}Z9q_OK_KCP#um@>bznr~9Z-<55}+DaXo&uSKzd#o}E|cD70)v+brJ z$FOzre73A_0r?z(jqhBC9{?JkV-H|T>G#?ym+L?kU^eF_=mxB-W|JP^fB1C(Mb zRG)N4N+6nV;AjvqD*u*Xh4{UR(DJMF=2Z-NOS&A(R6y6BY5{(RaR=-%208sDzv63lq!qc(YpA%2TUzaaBv?x(m}(yue@RFBYje;}-Nh>J8{2?*xIlFA zR2KC=4vFi(Iiyih#3>8oAVO?RNYUq2z1XQ+?o*5Ez+M@Uj47e+=X#mSD8=RZ?ep zK;_~x`!nLtx_yHKA7N)S)2+?Zj{PaAT{3|S@Mx%ZcT;kgFDRZV7Zy@L5O}JPQ;%q> zY5>OCi*~8m>jBl%K>XSNzmcj~6t$F;uoT2MA=5sIdr3ST2m{VpYT_93HUt&M{4%n> z8Si;wYUwwApyx&Gc`zfjPVRGuuJa3=TPE?F;tMMvq^aAdumB`DJS-rU6&mRZj66~< zCx&dA+Gl$(ZS;C>`7vMPltg&Bt; z>>`e8KiC{cB8SL2RVW4{NaAkAa`hDNYU7cm4X4mJ)DrNStxQH;7$zw}kT;|>1>6a>}1##rl zbm{=L2=W1uqL%Z!7`ohMG*D%Ukh&uEWgWO?VR=)Me>wVK$>>{lA#}UrE++nrdUrJn zLU8{lu~T0JjGQ0@7^}5glUnX(2?M5T>wgL-zJB)w@%XNv(6?NvJeU?;PJ!z0#K0ZlH+k$1qR(IAs^%wO&@$uD82xJo?N;UzgzZ^HcPzetD9C>tHwQT2AQ zKP968x*p1B$q!5on7#oiox5m;N=YS&x$#uAS4GNcW7+R)%$>x&YwDAqMgz#u+^FB^ zmTK4(FtZ>qVzHP2h~sqKGX18I$@L;f%@4JZ=DK380jGYMdA4ZMiez2RxMM;^b}P&# zxVF(0%XH>DJV%8+)u0&MIClBKNQ;S+`t9=HMI~haSM{jn7x4(nP1OJeG2Q18J0s6V zS&{m2JEeBujI6@R+BFBsa_fOoqdBYb66dOts*(E+^ObiKmr9T^K(5Ecu#Bmwi(Tg} z<^I+uKzCgi4;tn4r?w$Kf@IF`19DR~XxPXkIqpst|C7w4+pXJG`Il$)S1+LW%-^bY zT24~qXTj0kM-2XPYe#{T$3&IQsaHGf6@g-|i;K;cYxOP$dMlDX{*BZ5Eo%;h*Aiu4 z5Ng8?PQPT!-shH+O~*QyD=1Zr>Nv=&(aX#8u;?A^SEVXbs7ojCdGX! z%N)seP0iJRVDjwbqSMebr)97KyOixYICrug(An&>Rf9D3>_@c?+;tk*&9T;C|K$)H z>6KPlcY=@_m9&u{3ZPKU|2dLNsCrm>sHb`NRc2XlPn%+ZQoA*@EQ{(`$yhb$KJK}r zISnS~!oSyZR7S2Z?(XmIpeYIxtH7z8_v12=xJlo)NiUShF-fxEo|{C3!S1Z&Kpryw ztp{PSEDJdau!8@vl0Lus*TS!x6ccUe_q@X{L@WFFItTK1GK-Stk^ym*fXWso?Z6W!Gv|= zY#S`L?4_h~$sb0bsbtqd)e_XTSUMU5Jv7t59>8KW&l==>)n+r4H z-|7!&DPEqgj6}orn!BNCZPvo`>&db3x`SomAHd`o60aP)R~^kF6x)V%(@gs2TfJd^ zKi0oW92oXWipRZ^O1k&;8b9I;vJ>Smdn!<4SLZ!$+;BZETW%%Jv! zvr%^7SVm>!on*-|Rl5vaLvL76T)}?HbifHM%Ckh`+*uiy|8-dZ{6N4?N&i}Cr(-Lx zFIQ2lGwq7*EA5dm71>}zm^eJ8**>XBIn5)+7DQP1vVivzJ#9ci53{POMfv2*$7ywy zuqi3Pj9>7{aAqVFh7i?eQ0xDM%{|-=Qj4{oS1sbY)qCB-sW08Ya7ax`9SRuAY*OvrI|i?bR1od>LC@HemojC8Ez7FE}tHmGAfRNCNQgiB-u zXpfdV0tzcvERAP)tXf4<{VJU;#9=@Nga(0(*=oRH+rF(3{PMtYtwAA0i-x90W0$%wxiyu>sIEuup}7E z04Tcd_O|<|1v%zH(I;<5*H;~Kr3&q-N{9?Pu%tZ-BHMmjdVA~LWvpRqrf2`+R{Tit z4vGau{4?^qtfdDD5lBXS)2DI?$Ual{&%QGt$24s~Rh%vd&%OqL)U*|);(z&1IkQY8 z=^o+z@|)z(72w>Q$$~i>KA;_Ap(2|-MeD|+)n*TPpva`4(|4(P5jjCDW{pA8@02nP zoY+vbgKBwKC@|{8I(mrH(}nD3R28%0-io=WG6l9Sv&(L|$i1_|3fQyUI@52oPBqr= zEWSj}rd*Sx!SER&GV+?cas2w*R+Fr2>Jkx;BXT3Ii4L2083GeWtSQh$;O(_{smKEK zmn)=ZZWF?qBEJGBu!+dZx|x|QFo%EB3IeR?f6I;$5UKzKgi}Usrd&^@^1qGD>(R} z)bi(1RS^XTRCs?Q#hXqs!;2hSAPMVZ z9m(q_y3pUUUw*^}xHwpR`Aulyx-zY@)Ofzrmivy~r`PZNrDGYBX8+hCin7%dX}ne0 zZU6ltM2AN!@9-n9$1%fh#-9p zJ3Iv7@;qu$*3t?O7tpFt^)CP})C1kTf70*&I+5B)06Zo{yotnra&r4aLjS`}wOKpC zcQy@ymLIbDXX?1XhBSL|&8Hu|4gexnADqrP9PKIO=fO74O!yS37n&u8Hy1B&7+beW zw^^nf&UQX_1kOicqgOO;jwc$sJnEXQ+K;NF*o~ik^kuwa)m>DMmOy~^fE>fNz`9hC z*=5XR)jAZ<*>cAfSr7s4+G9+VGU>6t?bgNi8?x%~?CM5;9b%#GT~3LGTc2bSf#M9$ zJAS9ozIoL?PsWWvRp`39getUnSrDi7bTsbLN zl>zk=D8gj(590lF`|rZ>*SWM;vjJeLmTvg$^9R|yOksIKLjmT>hdxzaPR;aTF@w&1 z_3FRPwp;^V{TAv_#=Goq9LMM#AV;$|+hgUpPczc|b_Ip9rUwglqcgp>PfJZA}%#Pnf&P2Xc#l9Y1yTyfC z&NoL$J!>@i9$ilxH6I2W2OIWbg~0C%LRc234Hp##HsB|S+6}FQlg3+xb0Xi&8q#W{ zkh@ydo_ti8c+AoWC-a!7NjJ|YHfBeaXlh<&Mt^D$4R(M9#Tb1=N^~y<0033sCCO(n za*QOAjSbcRi|i0i<8_nER~pePe?mo8*-|U_V6LOVYkOGUFASL+w3J7is{&qK@cMxx z9%&%9p%WO#7=EaF?GFibTi2u2DQk{Lt3n{c4_qL(hfLalp+i2*fx_b-JT$|s0 zn3AwJRb(EI-tIB)kSi}THJz~D+D1q%K>-(fz{H&VP^(6v{d=Fnmc2Q+tLB*ufE09t z-Dy*x*nmSsoM$oR{pOQsi-S;#i`N~ekpyoy3z+acaZX`;`M=>4&XTjhmltzhKfZ$3 zcF7v=gHdu3!@?fcK`$?Kj1A$m^k!PIx59im-#KE8tdl!TbYi#%|YBe{W7?EM{s7b5$G>njIpFdA}l*jZRaNR3wC@9Cl*KGDrD{pZ9q~p?-2$CENW-k^{i=n zpJ}4g=XzB7|KxdD%Y6JJ@|o;~?*07{C3tHL35u5Vro}&^bcL~mln9Am^iy1r0)A<{pZObKH6 z_hEz=NF%@Wp_IS=me@4@0nS14Puu=lBq379OF8>hvC14j{IiZ#?vRoY4Uhmk8zgD1 z7YDXE;DP@)dzqg+6Zq?R?5^YYzV5DU{r7E7NC4Y(W2~wNnjC?1+V-0pnv%5y1B+!U z90S0K|3ALX0dM>RrgNT*{`mmlG>)R@FA&D0>Jq%~PF7mX$KU$5m|L+Ql_o#ti;NBvs%Fx>%7Tv~)g_8kmD`2Mx(%k9dDq>!Yo(+Qpo`ROl3BhpFVqtKgdh?Q@zw&FZ|az5(e|(bqWnZ^wM8C=V45{Mc^it za*jzc#g^7H&szkpum)8WP`I$0)~tUw%SWj^sg_+Q+d7VuxoNr-SE1?xcZ>8a-Ibv_ zpecYwRW)Z`s;ef)Tu)(#42!7t34&f#vaSyP~E zZ`*OAg+v)VWg4F6~$zZqxhrQ+}M9^z2c}$5#8`9 zwTALOWM@;DT_C`Nbtc$n8>m){JGiEIxe26#!X}5W5b?i*(K&vEj4-)H#=BW&nIm#y{U-a628#4sFm*m$F>63(cf6 zuaHJH#o&_>t{V%@d_X%)eL}>|6^6P!BK9gi+DPcK{gBrB#%S4UJGblZ#N_d8aX;%$ z4IM$wHNU~S2$K`GR_%r8Tv4mm@>(A&bJs1UJ)VkpUx_ZQKTFoQT{5er>&2(7`kq0g z)HjV&Z?1S#sV9|3SpA0l?FBt%WQM2}Q!jK{aa5si-nVM+P5{8L`*&2Xjl=@b8#z1m z*b0RYsv-i5@C>PkU<&|Lf(8|UU+s~RH;sS8YBiqRt7X1^4*J8qd-$B_eCa!L)!ilG zQ69D&kbA^u{-(kvGlBb#jJ*}jT4JKN%w-rUe#QSLa$ofBH*KM((2^HXh2F}%8^O_4 z15@H(xTd_xLuSHd?jK#_zr=685*j9j+*4kPMnu54Ozshtx3(ngJ&x-LxVJsq$jIZ* z)AL35S;yOe6xn+yaIAh3?Nxnuv(7B(s%uvxp<@=k1HGE9rG?UuQbp?y>nqURR;^r> z=SDIYcL$vjZU0lpd=Lgu{2zs0;{C*UQHz6r>>AHiM8R?MyH6>8vi<0IRQ1*`FQmzv zYuSBT>;BQERa~rOQ7aij;XQ4!Pk!a^>JxXXul;&p2i5nMwpPKeODz@=&m!Ph$TI6r z^hr6)e&o#VN|26~d2@a7!pTPOv`sLK8PRI8-=rtgrGKOB?b!2pIF<&8ytH<<*ztaz ztAwxQ+~a!&QJ*YcV@1_epkQQH*^S;KfE2@mcI0{?#}UNbJrOZ5C*aRNU-qW_x_E2#L3%=M(a+m51-?FWyZ%R9#}cg5o% z^>F*X{@XD=$W|f;0d;JMg#nRJO=;3+=Icd%>+i_RE{QdA z?K_wFOI9;K#?~wWGcv5^hRZvfsW7)WAEf$MW)?gdej?CwfU!Sa8Z5RI`}T3K^!2=zyKl{Y`SySavF~^H=KlLySr%YXbf9i~(nBxxb|4QN+$Y zSy2Ujg)F^`AFH`N;RQyI*3VpNlV{|6xr_1zFB1fuyz|^>sjj^6B=cxhMTH*=lz@EBmDKx- zTFk822vabo#G(yV_U9p#_2Dg)lbf%STNk;^7t&&nL?dw&ml-Ct@z#U(8GH3rVB7yu z>}QyO7{nZO^)Ge$@*c_QZ~yK0D$n_s`@?}qo_cW}|L|0|_Z>%yp_lzH=al<;zho~m zWWs4BV})D#y43{Xmpm9Lv-}&C%TKbN>@lfJA;30`Xf@DkN?E4UaoKY;G0es zBhHYsctL9iVC<&TY>NFapY=G(G_@H$J7%BD=%1w!MZQ@o-(l(!&i&J+&NQac=PmukqWfd1MGISJl0 z`=6pp5-fARHLVm!TRIi{nWm|)AuUw$GU0k$75kiYmqewf zO2$qQ;`zUi-Dog@KuFQa4{7JKHw||%3x#)efH#IyZJ(mI=eAI-KK^CqK^@+`)2h(K zNo8q?C6%uv-V`gvf`fj0$gSTDgyzTt!}b$*ldO-mkpgH#%%C-=n*KhoH<)uUD_E&IoJbAVCW%^R8NJ)7E$r8UhzxUEm^zi{U|g zO0lVWB?LZCamUB@9$+o?bTP%=BJw81TTbaJjHG{adaC|m8iquW>-|Yj`Eh21p08rnw4rAqzb%FCG}*RGnDv&An;={%>24Q)OPOs~OIn^&f_tg7m{$kcff z+f*b@JErF-Cn`9RaS6Nezl`0ot$9~iM`E9hR~WTMCu`59Q2M}YUesCn#6|lR!Qv#= zC;CuI&ApDS!)a=85h zp$nPS-1|2YR@zl5XTJ?VR>gAWT^gWkd6@l$B*-U(@p{*Z2_p12FK~bmZyDcU zl7GhfhcOJdBLX62r!x8h?ClUDPR*Br{#$oGP7T(h^p})XqjRw>vvy%Z98G!Q)wS(& zBMlubyz3PYMK}Bt=DkhHXFP5TBzf8zksm7Ef)cyCa_yj)J2j;piwE0kJ@y z!I_chmp=05xnSguOTv*0*OLF19y=;4*MLcR*gC4rpgZ5WJBE*;5OClgGGUXF{U+X)5$pMxo+51yog?9`j`#?+gYkh4KKcG@0o1z z;#-gZ&Qw)J{O{tcO3KgahfE-5`un0Y@UBk>FSvDg6|lD;Ae+0b%6+u_n|w7?XoVIE zit$OhW*?pFI!qEK5%Cl8(?_rfxWEr#sgzY^XR0!5cHlxxzwKic%Q)MXh7VmI&2pP5 zFqQcc%IL~lV1!hk6kj3)21M+&6=7s-(rA;|WMj6a8c5B7*8rq=MU6~+{zHr5fx(Jm zUvy?!e-u0-x6C1q$6u4Fys>A&;$Z8k0R^`4g$wuZVG9?3JD~riktDbsf%jt<;w)7j z06Tx%Ue6f-Iw+(8w z&rlN+ahVvh}Rs{k7jAJ#1gsfhldikafE z2Me#N^Q#h`EZG?u8#PF8(IDOxt3$4h_l!9T>p4Lf6~Y=0?APSV&YV7q zgvFiCxvaJNw^gtPrm**X!x_yV%sK=HK35QII~=so4B97Vo+Fpppdf45`?8;Vakdo1 zUNv`UPC16+XbLyBkm?^$b;81-pSD7qxI)YQv`EUMd3@=Gl+C+nS94~A8lYU(d0W*4iYj?GKgmFlrmniM-)ojw_C z3-rCM!f_|&ORy**!`=vp8bZEE?XwfYwxR8BR)N(=%G)Dxcw716ri0wS@pv{l2=Oz76QyxK+BWUE zU0%g5N6Gd_wT^gQ8m@C(b@L-pKEHX9mOfa)ueP!#VU)SJ{K_%d((`DN=d#c>@f@wD zB;zSw8Opc!;d9n3ZZRb0hn1-XPGv3LEJ(6ACUBzp`EX7GI9TfRsfpg(J~Y#73RsOw{~(gjZ(70_^5&;7Bq*O zI9i?;MRjSmX!_BDk^He+D;GydH%)=_3TAo}=+mFd0L#rs{&46xJuJIe*&n~K-dYSY z7VVe)NZYowGNKn(Z>OOCmQga@9EeZdX1*XzwHH|!?&a5(!S0RcR0p8_x~7Lq>Sj2K z)F@U+jPpF)i`mc6cj>|}A+Maghb+;hdD3SawIixu;W>QeCxE%<5Xo_<^SxE>Bnkn-EQ zhMXKbw-q><+)vCAYp@I&=`Yi;IO{4iJf7XVJW$rUw&j4H3!qOw4Id2RZ#l`TbB)Y7 zh}_88$=#n7I2|%_LzOzttIXpLgAZv!6hYk;t%_`oBy&+%eg~$P6?ML|zfR7xI?S>K zDr)cfvp$V&A1K|3h0{JA%w%b|p&6XW65pd}Yeo<^tx)y0|qHF%$YpEox zjs$m`0%O?WG%xmH%Xht)L3_5dV$?)Dy|IXGC8M|Sn=vN-m~tI;kGbx|`HQ-MT(yBt z9g%n&%LWcy$m`L{!E>B=m1Maw(hs=MY_GMTM4ihV5_G5@RQh@Lg%995(ad)T9OG&2 zIER%F_z=ln?$0+NuP7)@ z)FiaVMOQ=C=!Lw-S=SBjgHk?tR!%`E^5=!F6qYKayT_e3oVR0M2xmAz^^!2!y2M3I z-W=~2>o*3PhDWjwt4VRQ0od`8MJ7!5sAElT>dWD*bt6gA%L#jYYfwnb6{t3S^zjHP zeLvN6Snc75uc5|#Cz6SBJ)YXS5|kk|CpV`D1iuu5SG}gbQ2D(GIg^&1)3;cI(V!gH zrc^94q<3j^s{O|=!dwHvY}~+q4_T|qIV*0VqA$yqx(dEa#Y)mlW%mMeX>fH(T=Smy zQ>o6>*8M!M)t?_oZy$B5NGG|%ci$E#AdM7Xp> z@DvqJlFx_4Olm#1_Z9|-_#|#!lsP%(dpqF0#1Pv9@HQFYV?)JS z>Tr>POcHaIh%j60w-L>17D_wMH>$J9R|0K(jA)YeAkQ@5)8Ix|sNwbArdh{=+Yn2* zONVB`wD`iO#(Kl?&2KYn!(1O%{f)B5SlI^0-s$JS3l425ln!t>AyHAT4XCN$c*zRS zx;i#>Sl!~dYK_Z+5+S7>Nsee5j>&e18!voQzU6CL-oV8#Lc@Z#ySvkHL~apBFi1A8EfJr$*&%$g-sDt(Tv!w!z^0-{HW%OaI^ zJtLRyaT&%*K@v%@&(yOhJ#l?Z{0?d+fQ8YS*0}Tr`5#6b;GzVd0PCp!$SR-)> z3Jg~?#PQZ9?lSKe=6_>^NJh0^wVLb1mcEdseXihmNNs$dWk&2eW6A5SRjX)qmyzH` zSELXv`LQ?O#!K6qdHdd)ROlwQ!c+qT-5dHr)N%*gJPKF$%EpS;v?6izT3UTF=N033 zK-&I8c|VRissVaJh4K|pd&s@~Bok{GK zVlC#70tlEHqOq8^c)(1GJkz!HOSzDQ@}|u3P@GoIDRWT3umpfocHTJunZ@A(nKOq) zRuS{rJE3zo)nA?~|D4{P%RKa)g3l^ib;|XHj?BO`(LbAaJJ%l}nhfk3dDt z8G88}laI2QNW;LpeYb>yHB=POSfqgzSmH|0_ult|sc}(`y^y{2FOg6slxHLhS%+rF zuMBymqZ_(*`DJ49=d5e$Q zU!tI!NlTLnMZprqS8uw=OGYR~_G2Q%Mn!~Hr+;F|vaiq28CMZb0bd$c2*XMBk`9^)# zV-hEciBXFSvw>93B4PSw^(0VvO3o?>a!>PoM0J#F8k$KEY2*)aWT+%l z?H2-ulu6H~ctUED^Yaoq6dU#KowB1SD*#0XT5~`R-B{C2r9zw*R!r_c2B>leP&xor z!cHsTD(7|-y*@pXUi;c_47c{#jDy1O$Gs-P9>lMzNi> z2n#>P)~EikI-kE-iretD243PFAi^K98(Foc^efbjjsrz;yZai4e1&~MJ=;`iL-7df zLfAvuK6<&FWdrvt#_EUVbr-bUadOXZI0t}q`d|o;LDeq{1|AtBd;Ts0nbm94t z3YwTlA+63>_57j%&h=ujdfZQ}@@pO+?kmrN?x{w7?y_R?U^2k!L4}~5Wu*-$T~1{h zAn-s}ov<5&PZv5%b^n_{;iMW-;F3z41>!U--~J(E7|)Gj5-&2q5A(0SeDj;yLW=Gx zl~-h5^DSc$E&DS_aV@0m(L>Rr;oH6v2_tqxd@cm$yU8S!K}t)fs)VHP*)9i*Sd9gC zWficY&|2fs@Gib)Baxz(iR|=_L5Ozsv%k_8=jteHAejP6%SU4BcYL) z6e*(aE3H$_t>$OBp~Ryy5sB)bLv0@Gj&U*+)PiSmIq zY=LUHK>EM|Tl~=1XpmDz`0XCM&}^C(>|?~evKAPd2KT@sJVII#?KBG;fBrJI~?$85y7UmnHt4>n>B z2a%l~IHsldw!cmove1^6Q4Ezu-MwtV5Gx9E;B9&~MGusf@s+XGL*57zM15sW!Jzbe z_*RXwzf1n8Ps1MkQtx|fkS)z(tKVF!qr&-ud}=HPiTomxfFa+L5cr`3#M`B#a?!<^ z4;Q4@j2#!w&Bwi3isbCnI0CdU4^$35DF1OKM)F76vL@Wp($>1cVyMr3E62Yp$DTyF zk<2PEcSji|h);?M?vRg#{d+)~@3YV6Y#)QUP{Q#dE$f!&zRV>LOS{OUZmcqBe0Y+( z@a)Z%{NL7~IM=azL+V(|<4e!3X#f?M`?un|7^}XFLuR9;z4eHu>j@!#z@3kN6qw<~ zkM_zJ8jOML5W?`Zht)nQ$v$Lxs;<~3o?E2C3Y`?Vw1MpR!oV*Ucx=R=qAQDZ817?L+r=!ohW+?{)|nzObm_CHvhT;1jO!tG#TWL z#Fbdv`2d7>(-dGn-H{kMJUP3(wYU^W8V8 z(eMkkqVLB!0IVs1z{S7NM9+e{p|%`L2X_EdyBV0lS!lQJ5+i2^DZcw2Sq`IrPb7Lq ze3U}kCa&ab2~cGSwQrVjx>04ZtSBLziYB0PJsDCw^dN!4jZioCO-R1TuQ$v+IX1Rw zEm(=0ca(ni`T<2-xfl+E!+4CdnbAl=KFP732oMf126D@E)^4JWN>ea2k}Y zv-jWJ7d{qEH1}u4oW|wAP}qkO_JK5r(=_Wl;(a23%{SPI0nC_x;30wJA!(5ba5Z8R%GW|uIk|-iwKNPu((NI{L(9;wZg%~c9dR4dJrgi1J7ZvJcM;KW=aw1rz%&eFKWT;F? zq$p3C#=nEUgQK&ubAXC0Vvr0th|FGHGZa3LO2P|!{XZ==T2_=>3Xgxl_hRc$;5l=+ z*%~7o7xZP}&D#pZLlQiFk9AG&q1>~G5)>`+G*lkbGb%T#?YOF#l=_&*?{eDd zJ^rRi2npHjvXZ94bYPKoF~=G`5X5QYsYgWY*}}(SloMv{U0E~etZ(pizcD!w=#hWO zE7wQ1NMSz`-R|OMRs}DZ-Zt}*KzDy=X5ApUd7W4ilNgX|Bv6HndN>%Kw$5=7oVVQB zu$&QeU+I%nlDpyi6Og}&f%zVF+7)*<|F<=3D(+KcX^FJna%P{#VmqH4XjvF&k3Q+7_m z?wNIsnj$zaa6IQKNPvZnh>!}O7K@SzVJrklMv`!`w2vbaXNH^jG!)!kDhGULhWTv& z<%7&Ay9`S-v;^d)V1GtUbySr=dHjt1)#QjrY#S)0r;(U`QLuKLv(OMI1b6DHGk!A` zfWeSs?g%#UxK(+-J6U_3LaoI0Fy(!v(VJ1EO==zv-Fk7jKT7X9@nj$DGoO{cLsSA1 z00xpq&ana6FXyBixpnWiN$DUToU(OWg^hYLeuoeMGfy0D-BE^bY;~f5fnNeYCIB{* zQ`E~a>ZB(aT)J8mX)Y!`LDSMv!wL3ZHlyA@)WRWvIU*jpM^XP6{5^ECaTTOX~Vv*q}D&wn3 zw7RTl&qnvn?^b~f_ub5u$uSv=o;^jt%T@pku!l^UEoUWek45KC{We(cf^x-S#|1nu*8jC-zGX5S!Ivma-oTbd&c8Tm>a+3aZAQaG{Q(UN>YL^k{|} zbr;z;D)&jQAv9rLIFX+?$L?=ctlwk?BR{#ONZfO&1!q5s`ZE_dgJp^*p>iHXAMC$k!}rVIu1>Lt6b$`RkH}UQU0%(+ z>^jFuT($bcf4jR2oZp5WqzhB;=2l3p@rn@^v#F{V22zZM2{Qr_hq>HrM)cW)&OfyZ z?tYCqYxWxzF|B4E_PX7R)Y2 zvbKr3CX~+ZTfftj$Ac$TgI}75B7lNhaOcx+Uici3mrR@K2~a9VSuy_rAS$=p6{^fY zW@97_faJ)2^~X{=0>D&9H-6U=er8WJSZ-0Ie7%4i)Me=`p0_@LhJcYRoJ8zuAQ zMrnKl2#^D-{fvF`lCwC%8*N#~VSb(ZBJ@zC(#d<&UF-Ety&&lWx_ z=`w-V{K4PSWWY?NRvmk`V3MF^mrlzo1a7ZwhApPku`?-LNb_ALpG(pu+PCl=#t708f$7a@c1D{f!3Evx#gTKj zK!fujdUX35`AT!ajYomSW9x@LgBb{!b1GiumQamtBQv8R@AW*Q)3-zCK+U@`Oq&bn z{naMUOUlJ-tK@&uVTqggHBi}H48$s?`@=^y;CfYOZUs)OG6jK1n4=^n`01?2S6uH8$mbK7u5< z*!11>3F(!PthWBxK-Sn&@YTVvE96#MXwFBT| z7eN6Fp2v*;|4KvbhyquY00RtRl^t}qbb5lRhR~1LizsG8iR47gY(>%2%-S_kHOX?k zVgBr>LgW4tOtOSmv|k<`g{w*5FIY~$xAuwZeza5yPzIaVKk~B$)#&P@eO8f@lTlOz zGuqxbZ9$*S?k98QguzpR0f?JJiyyzgdQy-jO+jEh7dOP7aZB+!)&4hMUE!O_@Do-0 zudcG#;&;c%Tuyz9*O|{$3>uRT2oxgU?0r{iG5T%bc&Nb}M9uJ^gX@N@mJacHtc?KYkiO0=+%#Qo)iEqjtu^Z7I`M-j9IWn=O zanc)Ze?5PX%*%Q`0@Mn3-#W|M0PYcjT4ena`@(&#Bnb2@X~63oFv^?T;x#gki%!&hB1;XgV$*lv1- zcu=2YxHhV7^tD+%{PKqn5&b8zX0vyT&@oEbt-mXrdvGWYK1Fy7Y9TFrC%etkaVu-j{0V^ZU)Wvt(26*0>Ii}lZ# z5(1dP|Do)yqT2Ajv|(tWKyfIg#oe6(CAho0Q{16Iph1fiDHL}t?(PuWAy}~D?(Pm> z`k$Hgn_1sGvu19>1vgnM=j^lhV|&YWi=u)VUsECadIS&q_FNen4L%y2Zk5a}w&ZC& zZAWB3S?zNqW7GMo-(q^^)9`>j@jo_?hiOCZmTn+9y?z}If3=@Wm$jA!Ww6&uVprK- zd-Qa{wPIOueY~kTW4eMpdgZcO*4ETL=!T%>K%MH7hw`3g9m3Lnao^$3AKDgWpaF}% zhvvD`;MCGkmZffsX03|e86O2DJG}Q3WdfVLhO!FnSr<2LJ5PIqwB*|-Z4U>Mkeqgn z-ZFvw`_+(_Ps&L7EECxGZ+YIh5TjY^2dhoFlX{iS#+g~``!J+S@r>9ngnPRh745C?l z$h`?`2N~jR{(3q(a0KU}&@plp^C!=S#^RY)%iPq^ZuWN+2}Dy+7?DCK`?G2`)3fDE z;vIaJFESFBFP3jGXd*Y*|2}No@URi?KiW}+hGmMF`?}?iAF(ppXsUc6vaT4^M0cxO zb&*pmI5HKZVN`jW9ux4F2XjE@%%pzAY5+%e%;9Sp+MrSBknH3PYRnXIxwZ;sAR9c- zHDos%1L5J2?iaRxF-4tS7?Cda+?c#_;+3I$%NiBTR)-nje~_=Wa(P|re$uvv3P6Pg zbW>ro=OKHo5}(v#lmMBi-TzRsk|q2KZ1{}qkcnrHk9s$CLMOZK1PMB<5wZMw{B=Mg zHxXLN7V9p8&d$d;Byqgu@dxbA@Ty7?RGhff?{P{q-q;tSk;}iMOKE89zN^1ihQ66M zsP0DU&VZi!_E2w_Qb-|)0jj^lr(y*Cx{S5^$eC^UP&|r`kM2OW#z(Qsr+|II&@$=d z-^M8C$+X*kbMsPA_jO7LCawHmi?;q^F}o5%QOAu3VrUKa;3Lmp(|PV}c;EYO^W}uc z7^q**l26x6*JCgzJ&WKsDG~)KM_CS2WLL4!u3Aqq?{JmR3e7C`HIv}Hn9XdsO>+Ad zgGSuL8*{b3|FKB-JbX1y58i?0Ux_ZQ6QHG#F)4#8%@wNlvPM2YfNUL;4UDVrk7s-@ z#u-FO_zBQwqyPLU-7sGNu0OlcMTq%&)v9qAkt{F|7EPKKpJ;(tn>Tzsklr?{g7_|r zlF@4>DHs{p&3)Ze&pX}s^lwx0IuOv~r2!~!zMMn&9AY2=IXFp_p}B5gP1k>Mj1oE| z3JguBk7zf3?!z~`)vlP8fY^!#v?FBbqJ$@D89qK$$llCiImV)=9R67A<(DXoD`^%| z3KOUC{)fQ5qoFuaL?wwGcyqA+UMv%zMgn>A3V0&89#sy*Sj90L3lFjMw z9Md_++yG{8J+7-bu7HnIBO7no|4xQxfAno6H#@+Rm{|sR6$$^~F>og=|zD<_{ z}ctt1_&$-J9~@yoW_PB85gti3jP38c+C@mY&>0AsPMg>^hE zU$YD%y-`@Uu0xR?KuxzC{+6DECQ`4HR!-?&+FjflJtO|EU(hMvbnm?AXBwnc-u#FW z+`=!)kwV?}s|}Usj$lf}u0)w~tqij6!7h8#^|r}E^QxZa@h8-?)t=4DUbKtCv6a8a znsr!xUkmGe`@5m-m=57IXr}h7y=y|4!c_q=ckJTv?4jqHYJO~4^Yy2iC1elalwM4W zL3;aYz0jFQ#Q#Z|HmXQr3_}!2_bsgxwgIM74x4=MXYYeECd_X@ za;wOjeVlSeJ?0y0EF*p5i~%Ur;)!xZ_o@w*E4wpG4u94$JrKB=E=B;Pq8YL>k-|?v zvxgPX*aI}S{b;KEaA~7xUZ=J~*;L!}7a7y~MAL6ai{U#p$v#A4Hf?s5EP8?K;>#{Z zU3^0C$ehFbwEHx06$>P}4y-Zjqpmegn$DfNC#}w+d0I-oBFQQ^&_tLKbslvQ7iQk- zO4Y=XYTWd%68x(p{crPXpwP^iLD;$cS|Ik3wvK(ahR=fW{RInSZc#w{$4AO74Oiy; zXO`LLZ=E57pHNYOP0xw5A>pZC3R0tZMKM^jKAexVNx2*bi0( z|ALaIK|Hxyt@|3*iZySr$9c7B#fY)|N9uc~F#~ZqQSgGJ4o`p`fQ$MWFwQMfWiHSl zyt@kH_aE|5quxE!PPF=Z3!}REk=o)>)zmrVJ4zE$1jd=6oWmv$4Pv$tt52Qi^sHIs z;N(xn>Dn?50O!*5K*E9ufRd{X4=U_Z<^0>ch{)cK9cvwVm#(*LLM%(Ut>7cvDk3Vbgk^&HW4&?(9h#IZd>B2c>@bt(CRE7=MzWu|t$1{HiS?*{hpJ7p z%85yP@TXY8doYxJs|%MwK%_U47pWxNqtc2S<$GBD?b7Y9$r<{+{*EjTz<-b^Bzz@F z0t3i-dsI5&{yuJTLLJ%ne_^GtgYMS^*|y(C8y8?S_lY{c4FV=XCug9R=p|0GiP>c_h%(+#EMFOGe)BbN zBo}**Pk8=#=k-;&gu}v;NNH78McDw*2l2pVUERsZF*{Zb=i+8DJ)?u*;zz%lBMIP5 zN%?%1Zq5STynr->kh_T}kNme{mUnWJwT*3F{xfD8tS|UqB7n zHjfF%5qtYtb84CCK8Iww2vs~#Fbq4Bz-qGwNI^aONh}tg+0Tbc%n0y=_JQ}>p};60 za5locp~g268edRYp=~yt z}*q-sI5O2%Rvu@JbCsulKu^(g7=EVE zAw8N23f=+?vU7RjZUWmV^3&Z}O58cukU&lolH|P4Zzc(%>09dcN9$^>iD@WT7Gl0e zohptd0uk|@`?ZT%MEL!ro26V*wg^XT$ocnfH}Z|3b25GWxgtZJ#vLnzr3VEJIG)gGPlzeyR9YHBtxv3 zGDGjYh+Am`y!-C*dEy;9X7pKtj^}EKJpvNKGj%DpwZ2vu(jkw-{TBNX&Z_=qX!Hs# z?rWdORlw&9>eEFV65^i+s9^gO(8(gzyF;nw(NRsLw=*G%x%)fZ0k|kd&JY2q4-Vs zFL)rvmhm%bhZnEH?&=b5k#?ON9_9xaNYrl}oJXrtNsw8?I2KG&-ENfU3ZC5&hi zN(2*|YRQzset!OiGbxyUQ~~nO;T!IGT*bqM>}yx{$7CqVO^3h|Cju#naaPWEqH0~$ z<<}2VkV-kQb0N+Q1E3N9%a5tr5fmn^MAUky45_1*QliZ{x6P$jMN)1x{!aO?dzd~$ zCv@@%dvrNUQYNOy=da$kCu{`ez+lZF7=R7~EnuAnCCwzs#c;(fBCbkX&r(}#uHFn!?;=l4H85tTJ2k{j?uJg?kC#erMy;D=dE82e?+w? z;tCy9%xxq0zWnskE&1SE2dO44wWhqs2G^O$gtJ&OCS_o-XD&dNU0I8<9+nDIg+8)l zzE|wIV7c||&S(c_xaq~JDY|DP+@#xofpw~>(@U3W9*%6;Yf0__?p6-4+#*&vIKSj} z-xzkV9F1*HopjKtLpiq@d&L9OAJhh&&;iP?!m@@Hv(SuI&&lUjBq#`JOlc?G#Sl>W zl2+@xvizxCS)ui(^sOsJS#4xKE~C_B6|NL?g0dev(cM2;Wm>Y=Ga+dcQt4Q?i7uYO zn(=gK*-vnhDkgLeBODxY*Z=8dNkWneVl}i1dKVYvelK_kHe>oZ5NA`!c@rx9c_8SM zpyKmj{gcpjy(>ga1Bi!M1py%iOw?)wvp${7g#pq%vv=!3=Jx+%XkF z(rijOEqFZ?Y^uRox;y3YR~I4q_s#$3R5Mm|@Pz;h_5<%bR5%m@81?WC&tQsw?Jmz7 z_2W1yUxuHIYYrJ0ZHo+F(PUV;my%rEWiUIBd%Pm+R`@7(fEe_SJ>cv58UBO|v}K51 zh}pYbpRab#k)xH*6ZF>#{jbtT%ktshol;UJzgy)P-OSHh^N8+~Od#SMG7BaeovoWy zNZ!$!X&&Xm+_W^;AwhE(5PPj@us!=G*iC;?nW+7=UPw3W!kCk(av(6P$cMjvZJ3OP&XN{eTL}`&Bj)N_jCTF;AvNRoxGBfB#W|lR z<;MvjHm5{9Yu`-=Yr8@R>q_q4Cs#EpMU|8a65Le|pYZ+#p5BaI-=A4vaO+r5Jig zooS>^r*9jURAK5w!!!}_-874vTAJ-dohe=a|8V)vF$1d?-6I$e_g06Aen;L9Lq^D6ym z3SYs1tME>Kr4Y6|gP-sw^llnvA0E833o7%B8~3E}KJeGyW@TZ@dwRgk4Pyn<8F-wD z)wXk_yR3q1D}$-2FAX)deKy1aRSYK2w>;3N;1ijfC0}2F^QIHP`O*sDaVZY)v7=r( zohm3W^0}!082yW!~7OB67;VS{uE69rQ1scx9Vysvujuwfw zJruQCF%sx3em@?XwWEVzG!`5|7>L^=@ILp@JK(cvI!J%-0gdTc%W@L>tv}SLR8fnI z(^-Q>C5gI0=PwPM{x9AGCKLFTB<~4vY&*xJ5erXBlpm~VPKW)$rT!(s9|+!_CqOZZ z=HDNjR@FKPxzaw6Ciec-HD$f!vDRGbq_ARYaL3XEZYi~_e8cWI?6#ZKS&yJczEE56 zA=ZUKELf9Cg=oRhSpmtX6j{lfkBX5;+D>) zAoTcpF9Oqhy#J2dr(Yh0xUdh(fE9qpj(NC=`6gLQLe|J#9SAQ3`YE$ zFb`4Yx>S|28(Y-NYA-kj4HfP>m}F zOpK1Pk4d4KVDf*6Pe5yoH;ruuRGQ{~>fhYEn~y%vrF0O%hIE_+>kDE7M&+Wu5xC47 zZHh-oWGp&fiI)$@KLtORW-ifT!rqAgoS^jXuV?QAE%a|F*(=X?1h5v?^e@-I4o}>B zNcTz-vT{Z~i%4h5q~@(&hTFHYE0QO~rUmW&Oe%L-T&l;M&iLjq=}ZVPeU%l^%Wo(( zA@qkPo&E;zrd&zrr^|o}xNLpia6Z^|!Jbr}XVJ;oW!bCExgK5-vYC@qSSp#JsKt1cGYj%sl3??qe8r%#}uxrk3ZMWQ~lpv zQmIPa{L#ItPI4TGf2tk;o%5@8x%2h#1G6o(0Vmu`k1&Y#S&u#9%c>($>Ej8~WY8CaLiSJ{x>{-lEuLozlJZjFaS%Y3w|y z)%~DPg4^Konb=4$ro3%`ZEtSuakNaG8hXt3FNACSpVAr>a+ghMQbg5?o#moS7#JyZ zF!v{1R07A5Q0VoO2tbs3vL=L*51B*|$J`|V6Mjr#d2a3X8u1X35<(Aux+)+x;Zyt; zPEkIlq)h6S3cZvyc(VBM$g{gY$M#?tiT$L}ehX%~{bltZ7}k@X!yylR;#COcc1TJd z_>D14R#ha`132g;yu+ZcD9eekE}+*^)b2JFFp4@kv<<%Mzz~$GjZb6y0N!3R?9ZN;*~iuuWIK5=h0>V16=T+DMpNQ9$%64BP254dBp(WgS2A^m!}zyJ zcc4ma5jPigQ+MSmN90=*FWJG38>4)a|MF@`d|`)$yhZi$CxCUNF_s&bC zl0O=dreA$@D~cpo(pzt_7yuD3k|h&Hb8Cx28=`sSAZ4MZ%+RK2jDuksyO?>69lxIh#6Of z8nf285jyO1g)kp@NZIzfn^8OFg0JmI77uOmKh@-sA1$>Y#fI`L#mYsU;{*+m4$mM& z2dXzQH0?~ng-_-8>)LVG#9=p#?#WYLhW}=Tvvk@8MSsLi1j|Q!?AO(Ejao2wm~nOB zJOkzcREoo^XEE0~9L*vxfgf$8#C4XKHH65mU9-_a$GmLmBGzT-)Yuo9n={cc1Vakr z>pQlszZRk`%*sNa|Ji1Tb>Y4Y3huj9{GKH=(AhUf(2`2%M)M#RV||Fxshx@IXV^03JZox}^AAiF7F0U)AIlIHoy(UV%e#6mWnLu;&{mXK&5EzBs_ZPF6 z%@xzWENF_Lnk3Bq=#vQi74jSS-M9&vC>v&F5?u_Q@D4PHn8Yr-x*_7t7yiDK-BkCKCQj!)mm6 zKS|>v-zV%~TQiYi7J;fg%0=|w-rn-28sDsMuwq`bOne=f9|Qi?HvlyCehTd~PuDv~Jtw-&>}X(III zY4&CCh!PVvIuWhY84vw6!VQrct33VB5@i@qt7}T4bv^X1m0BD`a=_Vaf#zV#$2qtX zyjIU6s%geymh)>7I3Wewwn}S1id5bnniX@2*OTSg+wt?zNzTBmqy`&t68BpxonQ4y z^Fg_5H$Brfspk@xg|4?5eY)o0RIZRqS3z->K>ZSo!RhY?hG zK%#xnB>6V#2LskzXSW)MXV(xA38KCj22l>ffvEJtic}j1@q<0~#eIu8iZQ=k$Hs5R z!$I2uq*=n043!g1&j-SdY81pX4v(F~*4W1?cj;xC@ni?C-`!@;9ceV3Nmt-K0DVE~ z_WNxp9%eI@z*;kHehI7hxA_L?rv6D-WN1X$r^&B_N(V1eIQveYWPkyS&^I+X#hmKE8qx7guLm6su9yX7=G(V z!I3dgeNm*t>%AD+br%Z*m%X}XSJ{!dS=~015KWKRv4EM(zPSDuX2AFP9=}OH#>qd< zvLh&tUgmbQoF5i?!oykXZO?(;MWr?GycH;gaSfp|uME*QaRF(&I=9RZn+uqMqvw%j z-@F}A8@bMng;Wz!Gws#|GmLRI5gQ!?ITqjGu-r2Jk`bSt9-=bBPoeQruqZy>_Axq> z94#T4OQ7J_eDNlZ*J4&4ilyyw~bg6R>(w7qrQ#?)QlMoDaBpm;+fkajHS<_oCyVzM^;PQaGAU4m5m z{OEU?)X&c$a2MprD-(*%^*4-ng$4>^L|^0oi{f8$9wwZ$HM-FSrjYrQg*jnvR_g}4 z*^%;ws^YhgOY7e#MhpTaoX}Zjp-zmujbA*w~?(Wx8)$Ov?;eoptZM7 z<+R((q{CAjm*9&g4Tz;gz4;nJ4;a2C52oSg>4LTV|6&JlEAB)>scE-H^rViR-oJVS z>Jv_mfN@V2)-|8r9JI3U752h;7+2ciCMNYz`L~*gg9EcH@)P6{n&l;YVp?&}2l!zb zhmZgwUy3y&$+X>s;chE&D`xUUttE#BBV?qJ;@%8ZRr8`KPH zyz7z#KnoRqj(^2AI+UA|OGINt_c#LJ4Q55{q_v%zW)4?TF2N9h|8=|=%xr%U3Sz1z zk586&4Dd`Xb$oro_4GYxJ!;SeIAFRc+TufIYoS0aOwHw{rK*N#%)X+$9A$&HFwvsP zW#PC-WYG8d)m82x64f>-wMiRWp|b!2L1-*2U+COSP47E=_81!TII!?(*9N(eeSjM2 zxC)HddDay(AgFUZv-3kv#wjiHMYr7Y zWrZv!X(yGDSnbjPJ_pfhNC|x3hNBEm~OjRsmN! zi!O(HEWh^6l!g$$Z3YTtfWv6~To0Z-i87rU$!p~cli5@v4mp( zO5N?(N&Xr0%Nmwb3}1p~dFNQx20h3$HfuNORyb0l2jIWr^x38hWry5EV1q;iC*bYG z^y^U9414rcDZe&`Ie*$vK@cdyTK9VQ2jXlfA61S05yAR|zVKDol2B#ZsmN--%@>gN zj*L-;qzRp@#*O_#U0u|A|S8u9D;e30?ZZUInMR&$Q!WK-hbxS84?y@zou*oP7_S;s4QG;s_Pp#%ZVp3pPgHHT z2btCxQHSs{9#V6;t^Tai*^%_#!}IBd{{OjJo{9-&b8UY$uOP48<24@-)c7V91eZS% zR~}adR00HxavAS9S%H?UCV#{Poi+dJXAaeh_3BmF*sCC~UYMDcRLUx8?tQfh%v6tM z@xOfrMO)CiyXd~Zir_op&trV(4@q)dGcf}4q$7LMQ;yS`l12Z*AD0?;tLZvw=(DwH zHX}BX7Wwx3dTM7oJS|g?ZgDO(A|(`aXk|d$kI@)HSaK;mqS27#SSn3QL0kjkQvN|& zNK^um@!@tchiM6aeYcNK+G(=8fo0cubFio4UXUuh9PcSf;TE`e-4QOMCs_Sya1`0R zF_|wB0Anl*M#6+2!^0G<=Kjm^@X<{vnI#3y+@d*EHip4`>y7{g*Nk~Nr1_`@WY#PYllhO8#SkT@Y*YW$pzlya|0Te8*H?R; zAFH}Y2WbW0VktB}Wl@@^(?Q>L{b6Cm6eO5IR3|Al!2BX^8I38InWRN@le$&B-eluV z^>o{+-f_}t{)y_t%dVAM^FDu3cP)dnp!;SaK50ZJjLsw@@YqhVzS&P*&bU5a;_(u( zG`fANw*&(cW7bk9<$i;B1#K&NU$?=mvKDOSZ;=Ic4MB1V`TowHM zfIvr;gA`%Mi1^*s3(yq$bTXnp>lWicPveRqiS z!;C*fmgA^4Q@2tV!}%96+e5*bV&b@=K5mNQ1kG=2gr~oyJ$zgtOxE8;WTP_X5RjBT zq8Poz3qM4i7G1GtJ}O-oXkDjREIP9E%w?NH$!O56|P!v55TD01_McgVx}1JcNY8VPdo zdY;pLA>HVBpGQJQLhi;#91+f3LqQuKFL?wGdLE^O&L=Kiu`j^mOtGO_Zlym4hG8T_>_op%%!#jNkc<8_t9) zSlG+*v=OWY@mtfi;OevlC8mYkxD@EgLc=W6Y2;5TAs=GTCC>0jP&%CHBL2GN| z(yJ>59T+5SPlX$XLGqc$z+*4~+O(6Ah>_;0{D2At?dYdr9P-f~E!EI6TPxgqAa;`l zW?dGQ*(%lu1cVCaNg<&*PbV|2y_M}37{nu7oHN$m$&F_c_1aXx zQR6_KQysu>8P~8>C6>XSQ9OBAB}?q-p+B&?OhsSlw1P!Kdk>_(N}vlUQBKACZIwAEi5n9eI}Dupi@Q$;N=Us8un4HjOd6@U z#>@L^)KK#A_#Rn!)OU>Zv5e#Yn4W2YK(>9-EY}03s20Hy2bAyFGXrsKhR?~`q;<~jH#k?bK1`gm9BEjK&DXx-NDuy{wh z->W-G`uLk^TS){Ww4dV0f%?K_k64VIfT>Tp?Ve)wBgsgcX(kyzzaCQjZpj=w?K3mm zQr-2g?YOTnq+-a%#~e`%u{5O9P}q^vPV!%#oWdG;$VEhD*$d$c#AG@2?qr;iGRB~J zxEc*_&1h7hQb^!Y1XCu`yT`QF=OKQtgV$3$p9dNmpQBr2pt)1hKj|zsb6k|NjeirG zBvd#kh`R1*HeTqcAIP*=&?NA!v^C%7Qy3BBWS?i9G%5!xZ%c^y^t~hUEU3&Ue zk@s-YDwO`YUR7RGL@G(uAl4~L#YrlJ4pVCd#H{hY7DHF*iJFexYA+z2zR z2fPtyeQVb(lib+0szt?qu9oQ0w+ADWyn*994Pq26c6pRQ=d?go#CHWRh@|3R_db~S zmUGZ!q&cEjQ1h;y{9Xfph2HRH94`;oQuD&u@@ zCW4ggd=D#+d!$u;f2hdsLusU@KvQJ+CT;$VG#2m^A~bo6)3_%l0g5$YQHKdAqRphV zcL9i?ABxpxL{!`o-^kK82?IXI`riHF6n7(Ml5Y#q${8{S(sH%)R~tNH zz^mu3(lW5ti(p__2U3$?omP8ueBU`4Y~0qFo+>$(&d6cgC14-hWE#|`-YqIKXezCi zK`h>If0-TA2K}FF|Kn-o`yPHVr>7cnSXfR}`!5Z!w`>E$6N87UD9zy?+{)eZ*{yS( z?The5nMeuVyGm|={s$Qq?A6Yxgsd7q0oD)@=^DPrOm-T@V8qGz3rk2+`_nZaR1L-e z`#v4)5x_vYpE|033Z=OL@2K7;}(-{vS0MRfiP{g)hq3P3cTz z_#RuvRpC*YW+Y~|eeSIGzDGfT(UrUF!+66-#bB8F@sXgBQw(w;C3rfT%p-qBn8bzi zX7ElS^e)oplfVXFo7zcuu5f2;oOw|j6)RSvf$Nv%>tb7I4#~Au)6aJ4uNMgz=dBYl z8vMtH5}R84sMZcXGPY3GLa(PKxs|!&0h8w*_A|ineX5b7RK!YRo6m_*9;H-d?%laz z^us(>P36!$J zC;SK>e`j*hfB$!xW~Q7A);X$+wan&Y%)WYJc*(V&0= z7@H5RKkVsV2o!+97pwmUUo^B|DeRRwdWg-pYb)%KCc!H8QtapwPbN#8@k+b09Dcex z*-`YWeu6Eog(>kLUqu_~UGBt4+xM{mIp?q8C9xa(rHdsxYDUcuR`(Cx0ImKlgmk&o zqmUL-6ic8pS*44jGx5cl0v8{CVJS|J;iNv18Hggy>Z=5o>R0p_+u55}kaN;D| zldMX_d>!|iG65c8MT}*`!0hfw#s{~OB9ke5e7DBJYHk&ls~Qm(p4Qdt_UclpCz$xT z5If?rfcUTM@(IP`Dd2z9WPDF*S9^aE-Z}xoNeaVkYO7{Aan-IVCmnM!+-3hbsg+w% zIGp_s({~rSvbJBYs3}6zCwzJ^*=sy6#hm}F`i+!8w2bgN(y(d_F}jtkQ;h&?Nd@NO zb7w5hg%Ljc%YYQWXrQr}LY$j~2kNA>G!m301VqDZT`82%am|b+-vWb$4_6wJSa@Bl7UzuO%RgU^yDB@Z z+mE=TMc#VOlpv6*F?@+ruPIY6|Hxip1)7Xn#`Mk+tyzkietg<%duV5I#2)rYtaI=N zjxPIclt_t>`f_>SmH1{ppD`NA+$?+h+LT+#yd8`eRkNx_I+M_sINw?Ayns$uC$+Di z2Nz~I(8D(CR=8KvB;J>TjMNNvpzG_ZDT#-LGKKNx2&#D`3uAN)3#Xi2n4LVEf z|Nr6Pp~LZ6wfRVEC4uHZX(IA&gT)a>2+blJjZ5;?(Q+G_7G~7^+xE1PE z3>gUv*TJT5w@J8IikOk8Rvflz-g$nQTv_q_{4_a>QSk7IF>Eg7#rYfcQb7dT!Gsvg zQVSF6g9$zP0TWb4Jvhu%P#*G=DjnE$>8ETYLy9gAR;1b=)~c5-l<2Qh_?`M!w+L*5|7|yozrUg2eOfd)r$170(-5LF*PzFumR{rO*E%69MmY zaW`^dlI)`qvPTk4Fsa0{yXpcBFUiZA4ubs$u$P+N{4moxmL@!N=mD=t=yId}WayiY zaYq(F7nl~w|0I_<7CIRu0YGTQevYYfEB}ObVLqbC%-*sH1?g_m+~{mQN!@WYUaosf z5&YZ?Uft1bi!kUb_D)Fh`WR*7A8Q5^&qA?jt4k%3+16~Dlt&g7uV+S)kS>&Tji$_lRZWhQ-W z5PFEcG-bW(bnPTBoAg<3vOY0v3N%`beQ$1Wh_6(`)= zc4#2#UKGl?i$YDCyQVfXpNmwt#V;|x!S(J3**P-my=nhRV($~mSC(}{2A5{_cnBc6 z?fMsM0ju|&%igfjok{bKQCl=YN{7FG=RkE)sHA*lr*dp%Wn~Wbz*rtJ%{4&Nbyocms7%5cUzaB9p;L9^sA^B609W47@AA6Qf3 z^8@i7qc6y_`4oHs_1|>#7O{<1$LBItODCiJ;VdPM5FJ7m$SvOLC3Mg{YF;;Yupi(L z?)Ar2mF>FNZIzlt0kX;I<~$qKat{ivxcphf=Az1dxjvAB?mX{V>8eW|`B;z+q5DV~ zAtAKu1kY!%d`zc2X%J1pmX#c$%mpf_xCu(b#5<7ET*_ICzC>i|(w$Zzq@pn-wmhx& zZLU3YR^KM#ikH7`@@Q7CXP3RD6EN_zGkzK@na>mRu3`CB6PL~$cnQ%94tpKe%^BR( zm{|FNz$zIMMZ_dd-f-!-fxHH7monpa$a~kCkl(=`rmFY=l5yH!UQ}UmVU5A`PZyW0 zjg_qFWmY`b;HWK_%fYdWX-cthbZ)jMHl~L1?#=zI+rTp~FOSR7`4h{z=e+9im7ibv zdy-bufXa}%Ky>C`$w5{J^~!_e%di1&fAld7;GhRb-=X~__0agmxnShPe;aTQyD<1q zNle=(;K^w?5M0?=UC?~JoxU20?1TZsP~TDk10e_cFbKy@o7J$}hc-s#^=fP6*X9t= z-YW>Q>__&#F$A7BymX9qynwCf4JD1`g!OnwjhQN@;NVd>{DqSj16YA!DJ0(}b=!(B zA`c7L0slu90A{K76+kA$rW@re?GEXONapFWN?58Abr~n?tIB+8eMYSBAN=-`e}5eX zki*;EauX7lzB|%G0*% zT%K0G3x;7~Ek)yjr?}I>iO!>}gu})JUBd9Ay<6@JZa~h>#89jvCNcT2Wl9_NGGd|p zn6yD*YQou6kQT;%pPb**m~&2+J5Zv)K%uedAbrBQ|5yAay3Xj!hSrPLeZTLsGr(&z zX>oP9PQdKBYGW^CagmkO*2-Fbi4tRPT`V!Lgw)Nxd2>B%((g4=cvAn{b{gBqyE~Iu z&xhvAY7GsdhM%Z$I1cp$4yOpH$#e08twbu=(>d8XtmNF37=(;jdBL@NO$eSVqq#AO>U-*0nY z(QDW)&S=SVsM8g*6@NuVlib;f`zJL0kIZ2d>)2q;uYw-qZ;FRKtDiINzxjkd#hkQ) zZ>j4y;Qg@ve4z5f?t>tsAq!!>A;;o~iQ3gH*(tx9X+62odq~@FHdqq10Rn**laNoUsj9zv2IKMz5pyP`=M%hU;xJ(xeZz88E_r90j9dYk=) zJ#xX)kIM8Dip8FD;WKwXUfdO9`#mX?^%=_g^$4o|{mwAuopDT1HvgL}@| zTM)bYP|WTt-$IP)iv9@xO1zHXtX_83^3RHK8%pnsj8@$R7);`rYM0&AxO%CsA9+qy;I#pQk zztKs4xn!O<;F3O;rC|)Ewg{KW2@){Rd{Rr>F^RtSzu6ZljEU2^xm&o!WHzRNRfI_o2x+!4HK%A-~MjN?IX1!nvB zEX-6k~C)$1RhQael-8~8Dw~v#K zJc(|Q?a_|5iFA`C&%Cjv^(*0nl909>zNwrOsqPV{qqoa0Q6vdp`0&H)?`KGpmxtKG zkD-^hSyIq$Ora- z#2U({G&k~QjgS5QaPmly%c1?&-}MT8%(x^s{_jtz6nl=CbV98UzwNoVw!HQrn1JE^ z>}>}hVymNw7Fih{eD=2d20a3P6jv^APWb{iSdA3Y?&$h@CS`M{d#$wNIJ|=&qKP{C zrb-rWh0qIY8>qJ~n5G^z94SG{5&%GEMk4X#M^{iN$Vo4{EJA8eZaW`&y(bq*q-eAv zlui>G9|un-#8R*NMarGV$LXENqx9+7xq7^sgFUeiWc%(j9ThzmsPxVK6IP#8@~_$U zXDw7V(#v6?-uNd96ZOai1zk1R8w5GpBZ>_d-|x4R>L?N(21?yIsE?8gW|bIv!<} zTc>pBl6E|U)ky5AjwR=I;KE<*D*zV+1E#%@4xr-#m*H)mdd@W8@0M{g`$#;# zlNgiSoCbU_{&iQY(p1K1piM*RJ{S?UeRDb}VXAuF&%pRZ&Go9}J&B6cP##VfT_JTa@K0@7wZ9e!F4j>O_)gA%q1Jftp1 z$0PAW;kI)W_$>ubxI^5~(YWFB_4fSB4xkS!FUhsQ168f-;Oir$8=rlWj4!7I>{#dw z@CwZ))c&-(&1!B1$DHPRQb+>XS?uUzm8&1;EbE}AZWdQ@d%PJM8B5d!K+ERla`X>QS`Bha_r^qpWAjJtx$2m7q79*GLI_`p8ynnkKr~m14 zWU=~?5rYZwd)SQqKusAl(W4>$uWJ%eIK|!~7#s$A^h9xfC? zF^=7}Paj@A_U4Xbvy`1iLw5(RJ~k~v1tY?&A)YcL^?90o)5QCs!pj=jJ`h)Cw&qmjV%Qpg60icVJN zoh5h~c$Mke)Srb|=_-Z`KYnw2v-7&P!EPQ4^{UA_Rle5bOX%xI?Z^&rS)TuACIbj@ zD6WqDXp4~7h0*pe^JnIcM-JS^ckTQ1uoJK0i<47WcNe6)5gjii8&T z;ts_r8r)LcT?4@(SRlYn-?PsB-m}hKcg?T-%Ua3I?Ad!iGbz>TO{jfhu?1A$iL)?$ z-d944`Hd$PB(e7i#FL$HWo7}}Ll@*~)%sAD+;K1-S5PBmSIA*K>1oxERiA^UsS(qT z>7@H?Y2V4?l~#<@CcY4aVbxrVF;SIf!A#m-_^(euQ;UIqOejwGWrv&}vUKeiPf7a1 zVDeo{ZBF`hl61e#UTkI^E&Y}h0_E<}2N2@P4Q<-jTBiT8jZyBk7W-RnJJ|kf@ax;jo7=8>$E6nc3Fu## zpykM8(Fo5+!o6GTq~W#Xq;uZa@gtIxNMgooaH3XLlq}5 z3qU@5o)q_6h$Oowj9UIu|K-)0MZ||LD{v@!3{HYYXze)V7j7H11z~?lh8+Kqy#f{$ zl49pWeNfiLze&KSmPWAAN0XZ!hx`E%e zi)l3TX%LJ{n8E@%-2yB%H($Ryg!y18^MJR|sK4qxUr_`o5uqWz-H$H13GNLNxG_+x z6Ja)v*05~LQ`6GCK7!@rO6tOXgyKy3uPtYE+?;lx8V+%R3b9uAwQD41{;1IUi)e3I z%vw7cIpOB*!|HGnRX@2h65`9r7s-(2Bk+OQ3`!6kOt38VVFTgjPA@c!Qa+8*BgHy}diDC21$i%+e3jj4NKTOV?R&fMNjHE- z!9jQ_F>h%*Dj4(f+Y!!5NVPcb4EbvlQ|xQBo*}+Sxa@GD4o=gtsI4x6@)8h2X{u%- zP$$g2L-q?JM&-A1bKKFM7S^iKAHVrLgS?CR#$=8T2^E4+nMv@2_XkEhOQUBT$ho_U zM}*|Vi>u_jVgP(ZV?*b`t{BzU_3juTM|@!9Vl#+qA{y0kFlW@V!qLOgt)tPgQe}-2 zc`q|n@sW^DpHD4EUriQv1)A(5rv-**ZSc$kBE&|g64Yx!HBI80*`wm-tDp1er~yn2 z=zbI&@3lq(TXrhfXj4;2(aSGCV(>ui8BT^cU2au;gAlm^i!V~Sw9`i%LWU#zq&$_g z;@7Ld$lkYX8s_gEIw-xnpGk7P+f-O_T?+8vfOGp(mwAj6SY+n#u1ckMz*`()Co(7J z?lVN9h2;-t)!FqJN}`>K+_k2!;( zPogco`y;@o2sabk5YaZ!OU>5zt-QqQo%e!#4mG1FbD4R^uePxkFOu*{>=-tllvhrx z^nYl@h%Ua?P6R803h!}%syM{5Kd+8_Nt>=gRwLp+c^AHFv~1=bO!aFR>Dp8G*AjwmRy?=9#F8U{ zGtz5lSU<2@b9J|&{cPd44F5qKl23;(;wpDFGQhh@e^53s&hmSLzlw?=ogKJ~@uxTw z8#sCcfPft$u}cShtfwH)s}7?+J?`b^!$PlR;*SHo0wqMGbBIajtg}fpda0vG(14aQ zEg|n~{i&?x2R2oDLCTjKxVfW*1vY&4!3Z-Bu-|xZ4myM{-qIo;04FHW7orOcIABN< z#nn!rnBN*8LWGADQ|_I{ud|{-^s3S}GtU+`9Pob`Z_>XFe;2U+>WdV%MX#qb?x>q1 zo#{T4Id;?eR7>%cQsP_}#+^~9x)|;2(EEGlf=G@LVa+f*?awYEv$GaizoGh=4|n>8 z=qJ-jCNopm)_-dCon=1DmVoAU&OeE4Pj-1)Y5#1^Y}F!?4@svN5hQ-i4*Tg{w&b15 zQyXyg3|J0q-Ksn0@qGbC)+RK+`#t=hMiqcj4y7^=a9kdkj8+K#@8|>j1x3acFc>D( z_fp1WXJuhw_6yQ^Uah_Ow7w)DwdFf-#FgnV21q{d(a$A_{|;JI3PzlGFOVBSmM#vz z)B6kg<5`t|+D|yD;DO@&o^);)=geQ4c(QthpDI%G>l%vt&Mwz61`(B?VCzF`LNGL5 zrX$52WnfgaZms4o!9JaJ*Xq4u?$bYdc9Cb_SWG3m^%He!;_7AX*7y4RPt_a9$#?E> zHQO&!8f(eNttYF17@;7-Pt4VzNXiv|z^Z2AxQ^6?E3(<7lPUzhI$*%2Pt9xjhs}@M z5pl*MW2R{c8_;zDihU!Y)nXnq7L5gw6s$;k7FnAH__w&<721CM$Z^R0xL|MQt7HCf zhI*sB;<@saT=tm*|0kpD*#kD%X1>Fdr88q87seXKSo$+{=JrfxaG(ZzHG$%@oA`(~ zlOI~b(Z)jp>ak(09l~S-YtAC5~;v(@kS8J4{GQrTXfQ}m}slE|iC9r`jI z`6KB{q+4wdZu)HRlFKLiV~z0mf=v^nq%p#D8R_Y};Su0Aj}6z&O#(D%b=~C858wXP zJ&kTE)HLpb0(FIvzBn>>+3xFAMi!oa^xs(5!1n}Z+|)044~#G>J}v@aGXt@l@6W$t zs(da+d+{kZ)GFWk)FH z#^-;#13NoBwQeOS;W$x}7b#|l&pz=t@OK$l(z4I$IWbFaFyYKKL_u0L;5H$C=OxO^ zjK;expOpjD%>C^J3K6Nt*tMeCCiCzM*^AXC1{`SJX&P3$54Med=iD!;?h;6kmpb~C zxD+<>RRlUIiRoy%6xy()ZAmC=Yqu2^B!L-#fTg)_&%cro5f3jHvE=xXkLAoj!>G{% z!De7!RJCGZF-9HuI{?7Y4_Znzx^(ZU~15hwOjKWNx*=%%aN2$P~N)L%49_~@&e$! zsX(x$>A?*EYQ+}Z6nVEL^0=>!$zEIxofK$m)zr!j6jwc$xoD{63OL4|6P?mDPUzhG z&I6Nlnop|g{Gzv5Yhi_bB5ItG+W@MfTY&c7= z(Bq(g(%6$0oX>ZE>-HoJp&@N0Fn2JK>9ba$9#7`xX~?k}ay`sH^V{9Q2#h z(lTl(dKnT+zFjRnlH^mLTfK?dvqc!t;8U1Cyo8yen5^!n|Cp@fqelpX?c=~Iekw7! zBM}a1&LS>-b0x^Ce+dsYmp(^BfGv3RqnRhhTR&(lcLqRCe_5lqFnzF2P zxJMYRp#_oxQnbL$@r@0lUNy3Cf0*viio}MQB(nxvUW9#QmFBd>z9w0`<;`ih&5u-h7lh$yS2GQ`` zgV*{dpQF_r^jDstVQ6B20K}d z-z+r7!5tZc3<%4};NETlk%042bO?_8uvRV@Ul%p}aQR`3YG0yimSPxp9|it@M+HAt zzz6e(l_sIy50M7JHVsK!J{XteM+Tc|LZyL}S;2DmO>aW%44i)q?5w?bjM~KrE8=1E zz|vyJlOslVCn2kSsSvMIF|g`4vNY)_ix=&EU<$xfw4PWh?sN2)@K1j5*B=65@jJ(O zcd5#&okU$n7!_zn5_vppxytSfJ$PPIQ`|lHOt;j@#76xzqNMx_pTnicB=juz;<~Dp z=^pIHr=@qiniMhT`0Z}mcC6rmNODr?aeXa2ar%GZxq!21mj1SMj)czrt#_e+w%X4h zJ}D}&DY>k*rDVQsPZBzll#RT8lPsQW{Bw8Iq1H*aYPOiuSC4u97#ZX_fnnM zq!P8~sx0l&Hv#zLq7!QUAE~RRv{ig&e7YSjzPUajr+JBqkC8P_KI?Q`mja^y+MTiC zPnyaOmG&4~9opz})z1D=IXd2(?5e{=$Xn&)A88pJwW5zpn}oe-Bnf z=?2~a=FIP-Mp(LPctdOkE$Xe}4qCHhJ_`~TwyTg7TnFytUi_$*PG%HWSZ1}H-xa$` zW<`zRPx*7?5u^p1icfNs&kcltc5HOW;cBGat!U?sn(JY2&f4u?Gu1V(n_O~|YS{he zZ~*KGHNxwpS6s0)t;Kl81B$p@Xg6xHH{km9o2&-kNf@7o3eLwMdW%_BpOF8Q&@}J< zVnW&hv~>+aGwmc#v&?%UmEcoUTwIfwqpkvg7PG~uiJ&enM&d-$5XNC~$cAy<1%&IP z-Fx4xAt_jJf)Ng6X083sVz=f34{gj!mS61v7c}l;>wefKaaXx#dp>!-}S(rYSn8Qix4_*frOQE!MwukJy1pFa(M~vWX7xfa;Ba;ZKv>nTqw!J=H(T8YWW!L$$Ux*4$J2sfVr{dtw-v5Yx$0a?3b08>HafhXD z>umGgidDl>RPGH?Clml}NS@1(&w6P*ctJit=FgCK(o+3cW#3c;^ximjUtpEgk=?ME z@yBV7GW9c`QnKA8Y46}IvNmytOK*S6N@=@Mg01qAS!G*4>*PTnG)Tb_BlGtAs7@67 zI1h!5;B1PzjNb3aD7rPzpIDxDbIwGn zA5uT>N6ucHb>6;OZ1Fhw`B~=nvTex1TlrwIPUofSZkVle-{;25ENEfZo`awa z@@7len_Rt~-4#md`V6?(mxkvV`4atnM_CpatVe%*0HKIa7sL$ z^L2grq0HJW>42k?O92H(a@ZCK_}A2$Q5?i%Py2wZhtc9A88@`%`!#$T@O)CIqZ&BJ z(NY$x->|?2dRc%$U`(p&zu-ubReP|VM zgE1{e4Tg$08Tp9>{KBACYlI(0xvw>QBUu0~zZLXd;Q%q?Yhc^s#6=Ab31D<9BI11K z?ksns$Na9xGW*DTIGsf#L8xs}|D20^XG|xX3}N4SWe;0>Xf=a=FDyl|B`^P+tahq) z!p$CaPrBgd)hI6cgza$wxJ-{^hes*NHAvYq4{_>SVs*-uyo-N{1tLqdo@imfRul>H zzEK0fPe)U04$RS*{OlIfW)gazOVWlqV9i@BYZ3`9*kHN@$XQUMEi|9?f15LkO2~Qw z@UK(T2mQ{Z`ZYF-K!;oG0GnlnN3N;CkvKurnYDanPWXgb{1fWnkK&e!sDAX=K#1aCRA(Y?eKS1OaR=1RlVc)Y^4X# zR3%&6G_#D*o%VChT4;~{H3{87`c;H$-qi^I>Mr%S z-hb=$?WlcY!Q;+}amrVRHOhYH1ur57;udBP@%s$VB9q69xqVBYStZdm#khIzq3BvJ z-@zlJCu1eYv+(k**kRSI#Ia0Oxf?GX&%X9o&b#;InX+`K5vAV^`&1ftu;Pt(Guyo4e#ej_XhFs_$k1Z6f*L>2s9zyU1s?ay3KF{{ThK|D(4Nf~6?o78;y#G9sD~ z&*c|FYNdF%KqeA(N@61Ou?qIBaUm6E!}yxHH|e3m61 zMrpj%GilLAABouSUlrVSohop?#SV!XkZkEJHfG1VdAsMbN^;80q)%sm+uob`c%yhX z8QJ-u@3VL6kX|h#W?)l@jm17ZuB4Fx>K*zCj1cYEAZo4)Tp7|VMiqgdh5v|14&U+= z^uTl32ad2e;i@ZBT*&loar}ZaBfL`nQc^Y#>9tp0F*G?JT|e8T;Hffr>6{AQq}Mt} zqh+1tQwU=9Nvh+gr)8Uey?m9PW{&>kEb+Ga!?jZ=E;Mvj@tz2g3+PP9#6%8JQNn{R zLmr7hc*{bs_2!Qn8REwSZax!Il5J8oZUj!GNPUU=>ILr9_Z7T#jQmS*5}Q5eJ~+_c z*5f{l0PNNEwOO~ZnUPw`#z^oIn?C3QkqX=T$2m`|0{(Cs51SoPpadnm&d@*i{~^?C zF7p1XpP?xA-52Jo6P}F0lJQju`5prosBO29YE~E_Q(lmsO0;1u7XJt9z!v+9ihb&$@|(lb|%UBg-Cu=Z7fQmafTNm?m}Rsr|i>cM2oY5Fh)ZeQJm;_(?zbm zE#9H4n4H@l%Svt4v+HtROPECH9iubRRiQ-%6bHbDC3xU-NMM)#TAuuJ1OV67hxj^A zjt5uawtef6V& zo=dm$#NQk_(ywM0j-rMCj>^u!VZBd@PHK0cB z&G+m2cce7#Q>gfHly4`WB(-z~|4x<-80FqyTHHevr3LkDsSO{!*AfpYyt%(#3q(;F zxt;zT?FUCqg=pC(L;;u!l_G3x+Bp(K>KJ?4(t{p<3G6R3w`0BF6@RH-mb>RCvV~}6 zyqpXcstL;l+1tQsz)lX$yEB!OR7B6Lt&7EXUGEBrpR#CP=1-U6?1+^)&G>A!N#S-x zuEO_vExH34tt`8J*dReF6A?0muT{W>ynm3B?niFXDCD^Qe}ZjCyR7NYzd$#b!oj;_N-r*x*Ov7X4dLamd9 z9GR=rhl{JT;EQM0ZTjQ4Uc6%$y;ptC)XN@Ld7%Exl)*S_8=De zktOs?dTVX=z+Cm>!k$%C>jtWRVC$1QIPi~QA^fb z`dmQ=t5I{go4iW!qyj%boyo6OmnS{sjaldD^sfR#M|ytz=yFL|c{P^*G{7YCV17qC z=n()Hn(`7@55R>q2^%Y(urxQjmLydziDa4v93@|PX8(~i`R&iX?|ShP5DSHoX1}*; z;FUlfg&{U}v28gVcvPGA*wjL0&FdeDPCi}msdjx(M*RztXwP6Xon93X&o7)<+y+d~ zpID+KJRdyzJ+e;)N8q_!ckBVmWh0u_@1+$@_ot@E-~u<8U?ic~5QIAcdwO9bNf2Xk z<)tPBafc!n3&6;=)kis>5n$&6F?qB*Zce9#-ngn6H>kiTY}flxx>DyX?{f zwxbuwQV_$t`*%s6ftN>+lQ`(axG1lsi#*u-j2T8hkZ726&ON!?SdDRcVAe_>c{_}sa2Bjj)U*!JZbJjy#n`Xc-{;}?_l%hLU_-E(RJll2VYnH9D zV8r1u=W?w;zL3#UXa|?_hhJZD+xUB=>auD#(y|s*Gb6=pVH}hv&ST8K>(?_IvFdc)3D_BnpNJ`_JS^7Ayu5C^1ivufr_EAV3EJRVk|o#@BRtvx$AhA_PLGAoz<`w$SEuoo*0 zD>PPojPnuBDnJby$-V-$prOHN*BuB9b?oYk84qNmZZXFr040&w4XL7WjrSufZHT{3 zmI9CMHJvxRi2~4ab%G=OFBUVPBuD)cf@yngdHnk?q}WH;w`5Cev)bkRD(~Noy*d5h zU&lxm`TG=D@HgbqqpbaTe@7(jkv<4H{hSiMXEhxerhvIJar-0KeX8mb7U|T+A-y{s zJ-OE+Fr4SOZ+F2F?P{>sg$0{n_J)BQ)uxVf5P|#q|1T08jTJ5MJs!%Yv0s@}sM;5I zTiaF2x&pLF@^pdL-Vzx}2u!fbc?-!#SuZ+hiO(G6`Yj1Dc@b-`qHp11fK|IAER!{P^R?#7hwg+5v2V zf@vZ@g7uZ{JA=-HTypoEPMH@oq(gXYyhV`g=^wR~I{a}v*BTuKjMF#<3|7k|IUcd! zFrm^t5`ft% zH*&3XoFZ3e}5d@vxzw z>J4&qxCZx525!v;%_~vc(Py7rfA14I4mhAM2rpKyzX<%(?l}xlc^9OqnV; z{4#Bkkk95F;<9PCsq(bHq<9Y+QmNue_)WW>GTMuggd2n};xkwq1vjOW5kJ0rJ^JDC zL+U}J0kdkIju(+j_K{8Mffy~FwCuUd$L-8R5>89iJ_%9gTP(8=?;6uzo4(%vaFBCv zlRZR_lXCV?=zYDcbCLcFHABtyI-<(NVsk9lWVEM0?BQwYwZjKPwyI1GyOb7utWh3( zKj>w(L=$a2VWsy2ofh#jkK+5n!Maz$dhNGKuq#O?l6HCaO;Kx;!WqfguMkEnzk&OG zAvAK2CC4^|H~2IAPGt8>>@5P8bDMS_{Dj+YfElY3Rb&YUvk|ug&UpJ?ExS?jFT{8} zlV@C^i7@*cAGj?#ydOM^Ki>hfGm0N|MAPzUD>_{_v}HeRm7s{F#*~g-tL&PM;tcXW z4w2gf95KA2GMO4{YiwU_qK!@seGm#N*)Q@ac3vo}uU?QzHa$a{!0vK9KP@)fD(<;M z|CnvaOnfEVY!E86cji8tNGq45oo^v9fAG|F_;5(Ig4C&9b_oR|rP=+fsIoEmwv_SY zBn5hJwpSHT0s>NgC+dxT{kdyl8>_YT`qFrxhM5iT)O4?3#y#p%C?kXRC=DGxxZ&yF z=+=Ividx@{4n28*tiWWEcV-#~xbjkh!>{7KuCb>gF6pLjJ?Ksi1r#1}dhAw3ZaPY~ zF8qJW)KC&eN_5&b{ucEc@apgvEwJu)oWCk$!YdDRB|XuKpH#^tvgH;VGd;us zZkRaalO!FV*T&rF@_kZjEfEr8TneueUi`SS%)ZLUzD7u@{3rh{#V0dASRbsPN``&$ z?f+MA1NkG5^dW16VltVA{Ac}4LMi6MXkyGr#63Uv>)w~po{h*nw0vjc>XG#X!Yq;; zTgC(Jm5Z0O`pX1wG&H&(7QN{2`9DAZso%m69c?4j|4cH~r(J2u|F-niw`cOGceau(Z(oG7#gh6#? zLUI}rF*XTW0@E}F=XJ*y?<_I1(vMWRH!msk3Xzz16}v^a2Lf5C55$jQ1AQOaKAZJS z)&IKNS0Na6fJ1G?)y%o?BaQK>^}{>HS`4ygz{?lDCMN8l z>aZj8n@l%evE_8(nbr^MpKB^wL^_E+6ufr6UDzC!+0*_<7N-A0e=tdx&-;Aft+d=N zg#H^8doj>zdTY$#^6<~tw>#EuswVB<07kZQnVUwl=n|G#HJ|*F4mFKIFu2)^uaRZq ze4O>^(13V@=FC6xGL^GDd(MF~4V*q}I)E1&>vle&}O^ey$CkY;h`X$8B01S2y|XKBXX` zGs8F1Q0yUUL>*3q2gyGf+T}K45}HyB(&5+PLc(m@=GcA+nq%KDH=)er49Ls7CQE$o?+j?W8lo2W=O)rH%t zR!-EZIv#3O^xa5$8BRIqlId6Tm`gd3$4aURffp@uCWO%_xaIq|&hE*}>M4%o6I3e6 zde03^*#|xd+ro<~ZSQY2g5W*24i1Fc0W1fuYy+g(!FW6&i~b#;dL_MU|B$uZwJ_zg zHboMk6UR<3nshk+sA$@Jyhmu-=d|x1^Z!nK4X866x9SM2xDn|0yXQ?8V(UMB@Hi!< zMMhswv{7jK)+Pm*t7&4N2B=Ct2#4Xblr&m!fg^Cm#&Q#ELl>Y9I84%n#*vYjGAsSIv!Tyb+|UN#MI0u|gKv0a znwD7mYpeFKvMJtR=GOa>X!3Gs^$ot(U_;}m={+n19Hmk4i**l}2c<#)h9Sm&&p(re zNcb^*wtzx&Fq$RY-Ct-hX7slEi!xwlK91){PDz7+YB)uyNmz14n1wTKup6B2#@3uk zI@H}2)E~4YA@HAMOg-}%CA&_3YycVnpaOiNjX#)tKdTduQz&5VqCs6`vlo-S4-gC0Poy{Q3{b251;Q>lc0*_rQ@Z=gqY~qus}4tdxscV`v;ml{%FgW^vAFBg5vyJ|D2x zFR>FBu^AlKmgTL-LA$OWEVPLJS#{XC%h^&(RX|}(80|f*bMn&!GUa<){`5C$FPi;g z4PX1aWE){;sk4P~y_^tYiU}&pzs*SHT^<*jzH=*>u;Wap%^4T}$>Nrz2w_o*X}hrh z^(9rYwQ&neC9sE|2vlR@E=OA*j-$latLA{iApYj8x${LVc9U6lmEY0h_VWKSYCPkA zM2rOnz@psFUSA$kZ<&Z*8RpE9W7#dQ6f5j~Ova;|^1{7Z5kVNIZDXJxp~#9yRjdCT zhf$Zk9|mesnYy?xwjdO^cjgK`^oK!l)oMLq&l$}HqqDiXv)XgN3k!Ddm9R$SS0zNe zGI{Z|bfj21$lE2wLA+Y?9V_sWy=_F0=Q89^mT`8IVCR$8D7e&PB7V0`&P6H=NEf`)W9xU68a`uiF4c+`YxN}!7Wqtt&dP0 z3@!w(&@Z`#SS%1lDwb-eAmf(e6q`t!z5MN|B422V!NGT1O6{11sDME9AU*Rg%MNh` zlOB}a2wiVVAk^V}-eEf{q09a){y+{`_@RbHrH0=_dE0nVE#{U)_6xs13_Rg(^CFNU zEE!$94B(R{rD9`>GAX*UgC0>rofyx4B|2m-QD5>v_8z!^I%+|oPLpE!Y%R&<&-$=w zH3QmY4_~4l8Do|$twIBGIP{bZ5$&|hVBEx+PtFUo)_Ht6dv-e3Au@YrIA|s8?0{gq z(=>gO=qzMl%UI1sCd9}*vvA5>|7=`heI&#I8(J8Ey|)7q0gDVSN!7jV)BZB?hc>k- zFx$xe#diWWV=D4{a@5CjpSuAC|FZ%4u~!Tt16RF)EcZ8&o%$)Y6$dD`g3DWN!#?O= z8exe3;H8NE=e*F%qe6YgD@&0eKshi*DY5YR0~9u~pOE-BYH87_o#Sh|nr?YV!bG^A6yz z)~^`A_miU;&H;AdwqF+dH|QMrPPKFZ>maW~{Rhje2#Fu57DKAd@Y#G*1exPg!^9Ae#-h}HF+4Ht@+*Zn` zD^peVX#c%A-+%kkTv4S!AcNJ;Mxgn=)_uob6AgmGePV_q|q=W{MyO`;=+|VqkCqA8-jvIPb9@um9Uj8ece5uL7 zRDGh%-GTRBb{hRw!OMW#wf!#WB+0cvin|+sBoN8o=?__NyVbhLr9Z&X{VoP?EJcUy z*jjKXAvBf-%*q3UKJg#5AvfJu+^TE3SSv~Y{Y2?ue5fgE10N+=kz8K|?D5#O@_pfbZ198&@MGJ@H2bxqS8vuwssMOTsUdRlIcFQp_KO)*XNhw-WB4n z22`$dqweHTw}tkl6^%Z8n4k49h%D3?B*ZZ=tZJ?FB(2;s(p8xPwAHdw1JAb8+%e_W zg4stg1`p&597~9oC&V+1oLRb9c4AMhA9rsJ`6q<5D4*DBl15-oOcWYLfl7ZBIgECzmbIs`M)G(8?@sYvf)x2(eS-!Wr1` z&x%ogPYk{KU*^B0JtdU6He|nW^q&Q=PU)Yf$nz$}XY@Ii>3vxauHIGLVmb3;x0fLy zSH$$g?`b*TLuKe8Z41GTFzA7qZCNM|B(`X(fc9Cp>DR-QpgXH$*}$=NEnoD=lU_PT zwxWi!9W1E0K`Eu>mOD2T3;zxa;_>MSGi;BGcCBp81CjeT>iA9FvL4YuX78`^)5fs( zYc(ZZ?2{}NP}^2;+xNVV)55H~^`!2&vbtE`SiSR$imZGXyV(~HpR0z}NnAh)Be^7X zY%UA7?Re2DWST*yzI_Ca- z>tHDwIVc$=eBzZy?Y_lXX0XambanG{lj}C?SCYeH!~ftzvFi2z*w6W79!~Ci06jP& zj_O1e^Peh-sEg5o#!{ApBi~yYf;htIsC}&MXqj!}j{*Q}%U9;Cv^856Z5uK}_neg1 zTpQzjKZU3zo3lyuu2{AJ$ISv=3LWK7rOlHBUZt~yCdwwTk)V&VXlnpAxd@3+g4V$u z_TA+A9%(mQH)S_$^)8 znmwwp7Y^CUepv#dLwX>IDo=0eJ%^SkZubr5@w10ZYbB^m3i{UdhNj%eWTVhhDbHJ2 zd!+r1<~|8Em&{EK)+jdD$>Q^CWqO~B>|!0d*?=PIN@AH$9Ik%QNPRAOIDXUpD0{FRxdbS=aygM^SpaYSU-`VoJ$|wwoOG^K5~}6^Ln3* zrK8Qq1T2NR8d;2TSCkq+e6yH71K|CsDi#S_R@PZq#gxM&Ca>5ALQE|g`_8yql4-!_ zxQ_c+Y)Mq?YMH=-qx+yzP4|*uHF-qKv*1hY`Zk*_?c1~zQ<#f*Z|c zAX+15T4>fbJBm(MDCm5X{IZLDRtp!>dEE{J-scCV&c=cY9gUibD!C13otM5agOSQo zVjtBI0alPHN`b@x1>8tUU%6A6grA9_XZN_SrjXfupvbhT*3bNfWRl13Qyja zo+9EclSCTTNAri_X)?Rr|BU+pv&6rjdf?J32R6-q}oy?3;`%Tj|`lb32M1k46yFprAf8>U(?oWCs z0J#mb^X79Q885`Tr~iCX$A1#qT$_`b*{3vuTi4+(JWVLdW3qfO2Hqw)md(793Bb&4 zOppogL#hC!e|ESgMqwEGop;%A(et1~pK7A`a?xaJxNFn-KbGOY81s{WDI-Cq6GH7^ zggyl_rfJ*Ztv+oiFOIt-mdeeD6z?E~M9UJuUAg-cjhgQ*hu;Y``S-Y@ zm#)oUK55|+dzqV^b{cFxXTIr1oBx+tzK%R&0L?>d&X-s$-}50pZ;^vNHQ-R!FL7gd`&GDpE5qFnmiu{je12dU zr))Snm-;`G>?x6$;tZAf10mVD6n7DdP1w-jGvTEW1N}qK@V(E~xm6vvyT4OAw;H7a zs+70TO(J0L*3vzy+{GdmW2&W(*QKzRy$^GEw&3%vj04sL?>#8d&-P4IeYSM^H z$t;wB)}u%*5`K_V;RpFw8ZY0QKYZf5TV;KEvQWo{mNH}Z?GJK?EBRq&G8c<#ncp&h>PKKv!3fDIlKXKI zY~I@Y5Q3>fDw8DMj=i7t7VwCdL1--0q6z8&3C2hNdlvvMj{xr1Zo!z8&0Mk^M>%gE zsOwG;+e_K`Gw3$<&2s-zpB9kF39*XxdKJ%#%}p3>2d=h3oB8dFA30U0K?m1)hL4VR zw%0og`8M(*Z$e3HYfzrEQt=xae+~A&*Fu!viOap&aOa5?zvC~TwCvZq-)oG;p%!8P z->t@Z3}-Eq(f0&cFuSKuJ714x9`OA0L--{}w<^y(xLU$t|q95Na4qSQ1p7eZSAn@Ai6;11!p=a)wayskjlUn~XnGXoW6>R}N z`+($LpP3lalnA;+&bL@B*RQkT0Tu4~VZ7#x^y;?&*Bui2tc|VdH-Ww%1^u__a3-a3 z;tG0tvq+$$IOfWvL`?#0vLUEfCOX_mRM`%uAfQ7Pai+fXg=ToN5gC&7lvMrBH8$Bq z_)>r+!%Cnh!&bFLI`k1D@YaUdCEzQu{1bfl7&2@E*k(4P?ay!*V9?o08jAy?CE%-A z5aM`RAP7-9t-|S-Lo=Tss4f z7vIW&;Uq#oJph)eQp&4hJ@CmQ*gAAQqxmB`=fpT>WM1(_`f@oqlgrwmjp1_ODc*l< zz^~taKqc0je~HC`7}MWDd1$Q)q)zZrqLjDih*~58)+pD%Lf6)^Vqa7z4VK!vp+3Qo zktO2?Zn*_mLjGbcoWDF^6SS$20mBjcfa^U1C>Bb%i@mF8xPF5M*A($0xJ{Wtg9@jU zrq1yn457oX>SBqlX|+2TZf)G@o(w-H)B0iP<@jE-fC~LmVMBwzYGm$8v=vrcnGZ|i zTh8i$BShD`l0x{wHd<)ITJ%I1LXfE31|Jt9F+#mTf4P*fFP!dM=<&4eL?%A(TcB&% z=&5`y2v$!UVKg}UImWV;$w0YKIr|c>$lb9WtL$Gm&B+6~6i5p@wfN$9a_4s5h`zHb zi&c+QPmfn1wdxkj!Bk7l#7n_m^bR5ZS0xeOd`X@I898cMTl6)u1o=VP@hYpeCIpaO zRyuxL+nlx7?`xR+T18$O)7*=1FVpvVHe{;|LS%8hQVvzM?@ocduC*k>ExGeGposmLMAb zXQFYQJ3_rt{s92Yb!9niK%yg-)SH6qZE9JS@_GwkzjwOJ5d?0PYd02UVzS0P7ydak z{H@h7sFazSF9A&=$kcE|g^)2^IoOx`cqvghGzp36ZM{neo9~qpD%7C`(wSfB1>$B1 zO-=tDI03(s{Cl2j|Eyl4`v%xwd2fA9uWB|g`-50eRgOthPuY($1Q0;Ks&Bl2!?TZi zy)>67*1-!#x*Ap_Rf5{#jj6cPmg_s~*&Ms=O&sv&6b%5S)|h@=3b-+dwW?o zhfc;ySH$Nu9{ra2!g-D#H}}hr(nUJ=qI?iiCjmAPdSam`1$Qq-|dKPkl}5z_2cQK#eIh64{_a zR9+vgdhno;Fgy#(FbBNR+Ufi`N{-J|%{6B)E)vo0Pd^r#%OtDS(%GDWN`Cd&+IB+@ zQZVwUNy!awe9RuS+^^E?R9Ugj*H$jni# z8GI>~3t!w5M4EEz?W#KkH2N!8oAajzO!giBT}ReH}UUsxec~H zQs)aizO;?;TIC2Ezgv%^3ymS(FtUABZ=LN@?Gt?Yc`>;v190OcIxU%7&4_x&u{hTr zj#Sf~`1=o0qeCP8BE~BkUe#^Mn|R%BA~K$)T1UN;1x07~v|MXpLOQARu4HdQWvM~O z{K=<1Vb2&@E4qEMM~DtY3Tq~axDO1J)8++xePWv72l~qT5tIDUC-1oATrxPtfQzVI z&*BLuF^#8EQ}u88dOL{O+-$oTl~X#Ny?8lfuLo@?0+nwKkF&H?%hX`++KW8bnzX5% z%I?SGj7XadJBSrsBf)jn*nI2r*QSPWeAYzIB)ckb!cAOn^3A{J(Z#BW{~2cfb(klV zaQ_~~(!iD5=_V%zuz2zb|1cXcS(%`b`^_u6dKbsBMX_P@b2vFJ<0$^CJzYQ=4#$al z4F_(nSbU9gnHA@VdFWIRo{9g;-UvT4DHFuJ>A$Sc}cVn-_Rj?5)jZyTSLb#s$fVx)S4)sw0Ha5(sG z{&oP`)^DX~ww*QYb-sBJ@)8iIxF6T?tc|EqB#mrkhH8O!8$w>v^HIiq;nmdTRd~}hJr&*HnAb1%xl%UZ-ttby=}!^oRX^on*%Sg!F!GO|=bI%>B{US;(F8ylb|r7(TpL1UmojgnSECY;LKl@KEIHifTC#5RhL{-E-3kI(OT1*r(VnznUJfbVOqt9n!M0M2o~8 z*A<0Diz+YGtBDX50h=GXFEGabNI0%^upD$?En)2xvyTVEO|JUoHbr<3TdC9WUikbf zXQa>iDdBdx7U9CM?CRZewbnkAo>G7Ub_zf1(}Pgp%SfttBWh~5DWG>R*_7MeqVv?@ z!&ugsXP2nuon0N!#%Le3y*R|i zlFx#Aw{&T{8^>U~d!;tn_;;zM2ne#jJtZbPgO)z8n4K}6Bv!kUCLm#`m&7>T7n0UA ztyNmXJbL^5vwxZWs-kIo#XwAEVjfnzgpx)BGS?Jclaz!^WadT98d4>g2bvp@PfyE+w8f1$%r|IMx@o91sUs;cOi5mXF^nw z_VxlN4jkc6e=wUY+B>eL%~q26#l*Yg8(d5ylGAN<)8tEQ$%W zSPbA~YkpW&On~Y7qW*Xw^#bwc&K$C>GDsIP@P00!5&(}c)5J=x=5+N#EhE{g*XJ@a zyT36xrA0hhm0ZUm)^CtW?^hh)P6(#JUkW-OUCf*_k&aOsm%unb7xAViPl(}^RpkJ2 z1|xbnYu3!Wb;!*F0cZ_5SbMTzss=W{r6<0O{CrwVoXqhU(lp}-NmX}D2JCM9z;9yH zYsqsvYG{;B*?&P|m{FC5M*0Z-7Tl1HTqqYrIXwGh>DsI1t}aI- z@a_@^Baup-vs8P%RL7ix>yHW5iMpPtx027QTTVj6?;<06RAaP1ipvJxd;cvrO>=6! zH+gT3wFM}fcWYz#02f#v-EzZ$p$+?vc6Hi&V7$*3TL*vz zp2`VtFm(${NGa$QoR=eWlguqL^N4c){E~L||Ien0hiNN_iac`fbAS!Nxx6LSY|lle zJU9GR4JVxA-SiP8*Jw7pku#wD$WQ6g=rVU-m7dT%`~2=VQuNRQsBD#?jDvD=IBUxK zv6$a}qOxih2{NT)5!v70{N5T`69M+4y8mMR|M2ydL2-3mvxB=k!JXjl8X>p^*8~W` zT?Qu*2=4B|0>Lc=hGB4bcXxLgpR-T*TD!X!u~8gEdicu@ewq%Q}&i61zA102`yRuhG@?cQ#+<0nC`o zVz+~yiD?dlAMgCFn#^~S_S!kbVDgiV@47QL`o%jsBJ&FQn>^%aw*ZefPH5x9_8?wz z)_1n5{K3Z3j}=Dkx(Nr{X~E-T+3$>P`XoZYS5t{v$5J2meQ01l@){k(xf;Xxe!4*H6O@#%%OU zuCH=WI7oYlIT{3)Z3Cvy(e)ia23}wvXBr)V-q`hz4Y8@#DcL3=yZmtpn}z4ZZAC4_LS8-xZkq8N)uiuPf4(-tdž?8 zU)O5@2YvZ71Rb}CPwxzHMk_|7{MdazpRBnCpdp+s{p7GiEEa;9Pg4BvQu50PV9s|T ze@5SpwNMptP|*zx0WYtKP5&#p2r7ABy(=4Vewr`B1V03+J%h@2h?~(0;h44Q{~w5J&wNo0bSX`B8&uE3KeS z_G}(5CrGCaS&wkCBfYJ`YWH!1e+Ah45}1%ZopDi z^Kw)A$Dfs^V3+vb^<7kE!ay+FyJGNai)$t2_`Z{QLI>>ioM7+C;uE*@8U7&wTxxE3 zMA&NICgu8y-WcO7ul&Vn9#Z&&wCd9cL5z&uQ*F?D-VdxsNhHv$h$h z)%>L~aG3*cs{fr8;Nt3L$gMLC$J97h)e%>K_lFV2%xRo(u3y&^dI0C>F~eg6D4lL! zdhp<&>qe6zjc#i$P7y%0*XJ!r*368LCeabkBC)r;rJ9izWBhH$3HA+z@f0;6+?mBvUf&=Wiw#C-%|2Di5qpcLc2Q+DToYPq@gQe%7YT z%cvSQrhGx`4#tD|3X-8`?DyOkKEX*6GwF4H&;7nPS(nX^fjAEq_h-~l5-XpjujKHa zLQh_Q!erk^79Ys=5p5b0#m%;hrphxf&jIEwS`>L_Q zi;TAl`%NkC6<~Gg%e~s>8qi~0-tU=L*TGiBOGauJ?p{tawQs{FORg10DZSA&LeUg$p9cCp3#iW zUNjB=NHt)abpc1&$J_iGLvj0InB4X)rZ*`sCYY_AO0&guev(H*Bqv(Q6xiR>jRr3l zjen8mQyXyRM{WCxjtoM69R7t}(KOuVZny0Q&!?&?bVACvnw zW`p0is)Z-kBa7>k$tUqz*of~PXY(eht1$RMonZFgvVlZ}FM%(K50+);uriNFBijRs z$nD z-_2bbQ&}6~S!4|Ey6N-N;~53;;p5)<0GA>ca13L@ZAK#K>Bo}wKmq2& zYyCkS+@|^tk&S+nKfdgb5CM{+R+lSAtAPgPM6{_LOlE|*M=k$+4EuJPQtFdIz~JwV z_a+1$JzoI43PGrOEV~MGK_n+CBeyqy-jXpt5iUK15$YL19&o9Mtd|V2nj)kI_xjp) zx=892vMXD=B>ll+Ubxqf(@BpyU}tC3CxVtDJkuKfcKwHY`1p zY~7MQNV6hs?SqH#ukeU@Zeq> zA^oT%6V|1GF88u6{Gf^Z(bnGG$|I$^&i~7$FLA#trR2SG)eJakG;B-qeGXCPc2{xD zv_q2a>M4|exMSiBuPcKwk3dtDRd7UMNiE_|J(HUt(>FB~kBg=LQi)e~NKU2Y{I z(#kJCYs|k$xD%CqCfP106ThqXBC*XWK&%7Isa31tiua^5{gRbvM|odg$+BTxfEFkw^uc?p zDH3{=WrTTCXmG(aXQjB=X@?9rF>S2AH7iNEMkB_BdU|RhH}jiS)5Xeh9MhvA8!|b) zHdNPUGLUQb=Y~B?DXjn?tGY%WMByK&zAwtK63lcZ#pfzct@u*FdCxF2Gf!llc>CWU znmqfnP^UDNlDM{-&s5aGyfCns$Rc=1PV=AhOxw9m*huQ<+YPG|vzWqADlcJIDrP~~ zjgYIoh1N&Ly{>>u0T##4*)G1Vunl)l`T9tV3<+`jq;aD`cuRLav1c8Kd7QV;ZJ?L( z_wvJZ7P8=b`<~qh&B!s4l89Y0Fpn1nRtF>B6s%3YYGrRT^jcUF@N22FNx+pgqQ9=+ zK$Rf1G9pe@0PYb}o}9n*2Scn#Zm3Qo?LH^sR@4UNqi?uxeI(rFl5dDu!J^&9UGJ>`|`uHR2y@?z)_Cvm@im)uvKO9l0Ep&n@a&{zPLg5S_c zFLftVXTsf^ITo9huC+Up+%!Z;qr&Oix2Ct+Wm8u7;65S4+*khkBO?vWKM#?MlE_j| z0D~rJ_Fi)yYL4=3|40Csw*C!&PY*cb%N|cS>wO*4VZe#@CHa+JQfj2XjH_Zw^?mGR z)1$w9h~XG|hSPF7v-nmMBi*y-0(suxVlSk96B%(VA0(}kD-y@KrOH2ecC9Dw_r)&C>bO*v8i5RWlED22DX)5<#577U#j6*b|AxO~R~6CCZag zxHz>o8Wk0npT>E$ZYeMcWb{t}AC~G$@N=y)zBSYuWsyR@7%zN@?#q^7^5m*CpHaL> z7Zo1~_k9v4D~adX^A>0=O^`^6>=?QJY9|~Cq+|=H5A<20nT)c|GrAwrZX2u1jVBx zWiugdb7y<5238hBTd(W+d3dS|_?ZJh>p2f*;=Hzc&c}vQuz%+Ah@Etu6$xK?2mHsj zd;$>-8G8_Jm@Z9T6J))37WR;(&31Caq%w0uUgvpll9`4Xy+V2LXKVs}aD>c#QoIM{ zm^snMs~JxgCm((Z%!Rz|&Hh|+F>Ub7(DLBj>LAbPeSIk3>~W%^qU-&0Q)uj6ez4lN zk7z>W234tl(UEbGZ64<=|Y{_)o0+lDE^3--c9Q>f|(v z*)25icR#$r@sog^?0Xboy=o{Ldc}nqL)28@z*F+}k%odXdjmX8v-Qa^EW>*+z>{I@ zIWHE|l*;#0D0k*(F%|PMY1Z1%hYCHV#)g|Trb<}a!_1b*XE6cplV(W6ly>WoVPI!iGB@-|IcM0$4s5_1Ds7DK({>H-XAeRl=$74GML9 zuYX@{*HplY-?B}P@0D;bn-gpCZ4+!0{Y#{n)1%Yw7TRuK%7L_JZrk6tlXi~dXsAS4v2Y(g(n)s zct??3iE;J5qWFfuDF?0}i2)k6Rm`a#0W_eNHgcu=vt{Q1otQw_{*67C1CW%tHtHPjFTeRnRK|g%d0;0sZ43m4^iUv%8%V zzh#KBeXzs2-ap}U;@0|$eJJ%$ z6{=$Hhf1x_QVvKyD~-W4&>M+tNvMuTg1<0U&;-9*7TE}?Ma|UO9HtSKH~bUR!ALd( z?~m*j)D4-$om#Az(OLBapv#wmD;CxhF{=-Zc{b-{A+tAuSlZxM@Hd{^(fY!e0ffH! z{`>EvL@LMBf)%kT3&kTm(7JXh6w5;yQW3+o%AqKET2h5uJ}Tr@mMJLQ&l>X|{~w1K zl~1=AsIY--x;;3b0!d+~8HXK%8I7{ATq!0osdt%MW+?EH4g?cQAB_U@CGt+;f{59B znpy$_ijqJ)0qu0znsg@LF4n-t-AC1;aVd%&0xh5RKdtzflUs!Z#ZV#z-iSw7jykajI$gr-X zFvoQNG}HksaQiKJ*EzZZGl+h+9Kn^Idp#TMVk;$n>@fO~h1D>R>NgRm~{xT{3?r5l0fTEl!v>(n& z0`4U}o$}K=3r0(EGvRJY-Pc8_*&g4E)vr@z@*)}{nIZpR$HNF0KOW$tuOL8ZBh8(_ zw#bb2##=Hdq97CZY{tVdpS=$YuLH|c<6G6S)5ckyFk zu}h4+n$sRv!yBt?2#~C91_BbHD9#Y;evEz|L;K1j74gnPVr|(+RS1b2hh~wUKI}CKKYL1pjN2=vqFVV|u`IP;k~*GgkW@3M=O8N%*`0ZokLTQR#W%=^W#; z!-#R5WlxLw20zmKy;3S)MMH!iKioh7UzRM!hTDP=T$ZYO7W>>Dv>;@&8jXq@nXUTy zy{{xt1QPhcXA@OSn3~vU@#n4H5Cr73bv4P#iFdXEWWKmy<%}N>xjOhk)G7&^9=Ba* zvGHE|F3M`4h~v;C(Qnkn_4*W82XSx7hH5+u7%p3Sl_%8(O^^ZQJ>Pj%) z<5$nU0?BB&#rU-E2lvRMHl-Jlf@%)Cto7p~$@j^71ptn_Le1bw#7qZ4xb2b^!4C|7 zvmp=1mWFZQqb;sNJjQaue_`xeuWAX$vMsN&Hd0nVwQ<#g5!<_oXubt}=pE>mP!f=*b znTr;v;bRAsZ zb_&O(l)K)tSU0s+QZAB2F~#f^9pu*;bsPS&E%GCb_UH>!10+QhaKX*lwblS%Ns!*o zD{0UpWKcmQn5NumtM-Mb%a6d+7)}`BLT$dyaFCl(JF(sER0ug5wN*KIb^C!Io}1t& ze6YvrP{6CThGm@Jp5OhEjHW}Lf6Qw0e>Ba0wjLc$w?m1GE3JP-lhL;)FO~(3WC)H7 zI%P?*J`S2BkzT|Xz|bnG=%qVzkDZxzkHPK#W!F3AYCU)@)I0f0w|NUXZp0jmI~-j(O46IG$5OVItONHa{6AmVMK z(2*SgU+IfQe6iM_9asw>VN3W;8c~aV^$SIpw|CGz>7&?W#0)QJN3@P?s$ovN)sd1J z@z7_)(o!$<%H3#w!T@6y3$vm9sv*}M8Fj2vpV5``{qQ2b;SqX1DIVTNR3P#tL>H?z3!$mCZs&4E%tuez2cr< zwMRVt5sl;$7m`Q8=rpoMna#Hdl5JdXP_YN24y$B(t-ji)u5@E7ld zOycau_Fn+5=$;*GSESt%gE4)iX!=}#2!nGcjBWdRZddVOI;LQNb2+V|%VxZg#3ZwU zhNNPpinbi3jGB5S*7)9|m0IZPjoTT<0X+&%QKipQHU*z#Dk_c^$YWnXpm7a&srNP| zm0DDLpU7$e-a%TfPYZUu-EMT6sfwrZS^o`#%B5KJzqS(k%s zio##Y%XlNe6X({g-zhod)hdo*<*+8(gQH^1@|1_QU$hr07J0U0AwgbbeYOXFkf=h3 zkKky8VEQGsuN6Xo-;JA25sB}K$LzQN4I}lx7z>?<>j+i7*q`#A8#YSeZvN^ckFx!V z*rfTHtt~b=)35`@d^g3|y-VI(_-dBk>U<2PJy!Ef?$zj1TwIms2zAsTY5hKbLS%CU zMowxLDr#byxDs^;ImiT=lA^hE*pB$tg#qjRLE=R%^A1zYOjpl01i$wzuZQ=5tUZz? z=bpNGJr56ec#(92s`yIb$~YRZb5b8$a?>cI0U_8cUtq6hfHUHVz#shd=cbr!5j$H| zp8OcR{u&bqhpzg8Ko`@0BM(JYxarzsrrac_;iS>?$m3Fcf6e>(R`r_{DL#E;Fho%H z6HHuLaez}ejJPa~eWJLOA5;AHSF~SnH(VNtcxUP*dN_{(i^)(?kSccnjJZ9gh9%;* z$f&@3e1H$a%;hKAk>;tlx+i8gxjT|{t_XQERrI@0)0t$ID`{^En6YLADWYM{~E zT}2LS+7p;}p_;TdH2V7X??z$Pvi7dY6wxm}HAP$-!4lcW!}UwhjwF}ea*f4V-fVW; z6f{)wl6JD!+R8g&YsSmqk80Asc(Le-Q>8HreSeK_{g;l~eIxwpmJBQCt=fEzigeAp z$2vB^aM{W*ENmzOe*xp$?%h46sK!SoX}4kDKxjCXh>bONR3X-kWKV*k224LgE8lcb z(LBDU_07tH3-itwHpBgRa%8uiPuFd%k8Z;(-|F#Jmz;_!+A*1Y0PU-{{p0q~E`}EG zb&(+N_GY!&ovFBf&q~h2uQO10-a{srx%e={x|(5s%*pQ&cfdW-u6;}1@5q}j6c6=q zoBc-?#tNa%>XWyo3KYZ%JkvhM9!4P~Vu1n3+KOc?!`me~NybjfEObZy)5_Ryeq&{% zG+kF3XiN{hjI;SC#N|IJV>apVCnW)QegP+7&fJ?t{mNgiG%{-JQPdu#fHRz->YALr>rG21j_2 z7|MJv1RbZA7MxmrN*?%mx#jnt zv?~e=w`BvB0xSKlo?J&HOn#t&1(b)~9ARgxS6QzQ{+(?NE4E(JD9^`@Bh0brBBZ4)bN9TCn+KUpBBX;=oD!cPRj;5D8reJ_}|h~D{E?M zVx)~&ysRL`pAy@DXSEWdvx~iG7Y-RXiT-oF%nlv&%mKIvSjO6@RkfJ8I--Y8EaFMu zp}`KFsg#w=c9Av>;Lr67Fw}x1H*6&X?f@HbMV2D=hquvZF@Tkc)eqM2{Wmd{s*fFJ zx+5DQ;i(@ujAlb$l^&*-L`^}(ckeQ*lK|6aA*$l9*SW0zQvJ9wM1qU}2Kj)}0LgXk zxnl}VOF>KkwuWpd{bHdTawqs~Qib{|q2_*uj zKSm^{RFV2yZyC8t%^|fbMH*Y0RYkKu;j?i*@7qE%Q&CZ|7(1NdT5{LfD|6>RaI zFL!k+IR7^}M)-vFwAx@fUbJ?iB;wWWaJ6rV%jVgnq%#0ET1L}jpOpA=A~nO3?G95I zqbNwxY^dT5RHc4e0gM6Rpjn|{z@F>%w64CyiZbON;bGsb*PZF|=~M_0P;Ck#S?#8y z8uh250m0oK;S=9c*Z^idswY)-J284~cyhL@qkA;K9e>|9R#a~EWBfE@#UYgvE1m-7 zienI;57GeaMj+k@BzOAVH#%nrG7Bz;rG92|WVL+{P2il4`DHYOvOJj?_b#9c*Xnta zqBr(rfKSWQn3$wu_m(Pb`}&cmkZTLQJT6TqCQJ7(`csyJ;uj^elvcPz%L7uPzt<(3 zPL3F%F2~D)jbvFC)fK4yE4K9LrKGcYrb?kA{@=)8b~dDAuZa^pO+Vftf;V87+iHG) zSChud6D5^`VCX$3g#QPyj@HyQz-j~H0r9&is?fYIpNbK6&yRJko%|BnRGY zFp)z;0)VA;;4K`SU*HXQ>P}B9h=3vdNPLc_kh3m9?B4xkRY0NC zxNt1REKUh1j_e~hHjueC561}+yNfKs$B`kj?>Sf%rou%xaja9ip`X&bH#XJO6>L&T zcW$#ADlXrjH$4fP5Y2XeKO4Qrayyean42S8q%n&FW*4@#wefRvdyX5c!jGf*WI4d{ z5kNb770?s!{DUI`&Bwv6GwWi%W$)oE&rgqa*}M~H1Xng54_Uv^#j-8r_b^cQyV)rm z>8TGTEIzSga492y@pY*4j6V>P4C)Ec9fl?kW_!Rs4h4UI2T1ZHDbT6 zF^!9X-l4DlT^#ytx3`cfzoDBga4ORC4L_hc9|cmGtl+0y9-Jf&0e$W(u1Z$LNJN=c z{T=DmtfO?7N5^?00T+eP>yj+dP23e#3`S#F3BCdYfJ<7*&&G(Wpioe;Mic(8l^z0E z=Kqqa4e_a$stV503`ucpfVTv)4faBW!;^NecvmZ4YR+f)p7j_Y`dibdRR8Z!0N54E z6DOu@I{1qTxhv|2)zIs{S5gwr9jN^=v2^wn9fZ3l)R6K=N_zVvMj_yV!LqrLkxO)V zaX92b{fzuGBX$D3uWyTXtcRpVK6}6AbRK`sub2d%?=JZj3g+M|0xUKzSvh`(8@CRt zoDnD<(NFn@-OZZ7F_iaq&6sABk{M=P4&tw_o0^)GR${AJ`?q^E5b;JwV~J4iU#aY3 zVFjUmV4uhw(9M2ljD>I{!WI*LwdqXOPVN;!eqo&;t`DuxH_1LPRiCaT^DSvenwqg? z6UeMQp(s-h`F;JouiZhYzy8-1C?^+M-_US8!fkCcLe;K9S1gu3@m2h*p#*T2X<3Ij zEwf!-urGtL)0sJ?PweZi$4~6fJrvyn1h@jQPoyqPmCE$8WT&IEOU4or{~<3Rgd@C)b1+O3 zjcvQHk~jSUiDr@n(NHM|oZj2C_iNm~OW$}!>^95yr>v&f-uM&8-}ofxjVYOY=GyDz zRB~IucWYfqQQ?9nz^V@Nmb+dMhnh-QeHAV;w*^;{Ict~<q-6O)#k;r+bMQy19rz%Q`95wrFZQtNkub67$m<=b8io%L^?rUKpy5oBkS zqfFsc07v9Z!$o@}KEp;vyRS${tqWsCy6>XW8|*X7D+m&-R>@L0bd^<%3rkSzWx)kr zm5S8%US(FI`6%g%@nqVQu`>p!xlb7dYGU_v1>b@zVV{H{%<@ckem?hY`TOX-A14GI z8ItKm2?cO0N#7*+18l8EZGWYJwNJsnNEg^(DPqI7k0hSpTRt%s#~!(rzF-tns+p@P z@RD|ivn0}wY~)>Sap3bLG`}e5I|y)RyKMbeTXC&O^80t6qQ65a6c8T*o~`q=Aq3Ffi*Ka)Xv*}m!Q_9F<+m&Vx9kZ#8A3uMIKiVkNsKNdyXxZD2QPW)X-uA}Bk z$+IJSgrQpY)kB@-M8k1cJ9s9m-h2PTdCNrYYJ#i8uUs%Ru6ogPBkH=Y*r;?VgwWyw zYx6YA;c;nYKFj~nskmT1xUwwGmrSD z(F|d7Rb@uuNU10aF%7>|lwFviAQP`{XI-zj6~>QJ&|^s)<58>|nYiparM!8ZlAaVa zh@Fw#+}|kLpdic2h4im2+27c6#c8&Y*iw&ZJE8--cUY_sE(~=9s{~LP|5jhik$@}p zo8K#UD&EkAX>c%EI1POee(%=#kaB~0pSTzj zg>}z1pY3D1a_brMUskjz8i-P4j>FFkDWe+;~wztn^SZc%>*aGT?ef_A*AKnvk zfv_5aw)}bT`60^p*?UE@{ddgLfbnz4pM`CLKhfR*tS4AD!=E-8Y2kRuToEgdu((fE zu}lf1=sXIrUyVxIOF0_Gn5wMotoYsE6k4-5Wt)Ckrb1R0fT~* z+j;xe-iC$(v2cctgWu^R#$_pE>lmLYL@n>bk zM&6D5$pfwroZ~&54yi!y)O@9zQ*9l?Al4d*DL!a)~3K%0|KB?l%@h`L{pJ2~}%t+pT zi5x>I$D}biSP0^MFO__bWK)W;>!rl%Ia!rhxw{ZW1QPfoOp{~3S64L6IncnGEbg>* z0k~53ah=dhIQ~fX`vyPv{!~o}UFUJS(-sBcj$1y@aI%uc@JQQ!Lw!(|Rk30NX|TCH zEC2}PaBp>_w8pUUK{o!dK6=s|LR=%q_Cp($a0J%+{O9snm0f$?Z_5kdgD*JHZ{6YL;v&^v>o%cl3vKl4mQa zhqV^^n7hn@9J`%tm+RgqCA+t#enl+QC(vfKRkup-@4inLIWAS>Z7}ut0ldJ_VEx{O z+t8e^0X~4SZ;V0nR5^vCu^J5pF~s_58F9SC4T~NCcVPi{4!^C}?>Wa7?K!_e)k|e) zzGETugc>`tbw(a$t0oPK-{1c8-Ztgd=S()|PH_Ef{lhKdryF!J%pIOY>pP z8M3;%;EB2I?ca-MC|mPI<zd^Jfv=8b5Qc6ni6ku2$R7@|7xdoY;6u zTGf5OK{&6zw2h6uO(;#a+5iC*}(Gh4bvffv$G@YS9;CwPWbP}RFwU10q6bU7y zQwUKN=+n~3MRnvHJ&w7^tk9p9_VB6%@Z0&e zPrIHwJq8?f-2TB1;_w$y!QKyNJ(Q``q%_&w@|Z!X*nxZPw3%&dLHeJ}@wmB0-v;#( z6qYq7;vo=_(h(H}P7cicdP6%DaJ^HO3h#UHxW9|AWL;bB|kp5*!QWW_EAyjM#vB)YYka1=}+6m!@U z^&xOh8g6LsZsvIDN!YmyYO?ZxoFV1^6e(#8Gdw0foNdc}2%LGsgFKY~XL39+FFQGwbZ}3cAqFKaneh6hWC~1<2MD6?#uUcu{4!yb;=5 zme-gQTJ|J>JXrMQ#knO8#l(I#-*^SMSh@WjbEd+uQ#m&RC_jNqT|vmY0%tq7<9pvt zG%S=D%xswBZAe7QmIei!Nq7cPbbC={zIpE_utjYSXWv_wZ$5@xL6hG>*8?GRH3YQq zOBa}`ckQY!ocFWgaSDqkp90R=+fPv7E}>S6`YDY8Z=&}GnuEAe>NVS_Hhj!{#jb#Gu1JHQZ?W2o4rb%sokm(A=KyYiJMVuYHLTzYVez~ z7IPZXHUOI^)w>Dgv$Ub9G+Bm|6JL+AL>2miMJzc;5x3C({C4K_+MeJD&D)-ZL8Tch z{!MWN>;_87D!^oj4hhYgTMG3eh%c})*ZaSBYyLfy89KE zqEDED(R@-*)T2hT?Et#LY+T3HV6H(AG&ig$gOn0H3O9*ZRmq#$^y-rWe!@pe^V}|W z&D)l$Uikci0AN&r{(vl4t@N`5HRKi_!7>jsKtB0hV~zh(`E&d9En(o@8GjhWhhF*m zXx{GT5xUg49aw_U`#u5beWV~OrwT`w+4wahDjlBA;Q8Xy&MST4s-l%Ht(7Ajv$4h)ucdjBlKekq2X@erDkP0 zi9ybwXUZE9xyR|}-ruRq+>HWdJ1IGhT`JzgM0vfpyyJaY|C0sq>SZ&0{AV-#L;1pA zm}LI#R%huubR`5N7{RQ6dwhKCPICPYC_=}PDm9YrwFb+K=c$Lqn#!RoNgcY z&S7c0-U!%eFB}*{_sQ%|V4LX49rxt6S#wpO(l>3%>tKhXllUt$FsV~rKK=v7;)I25 ze40E%kuQUVeGBKPSR*VKwx{6#J;!48&0_=iSlLKs2eO;GIK{jPcs2Fk z%hUMG^-FkjufM86pFTf_kOlxZivXv4*JWEeuAeYrGb0Pw)om@pjNX)PVeG zkq?XSxd9h-Qfn9ppdOhu3^y9%ZcX^b!em+putJ-@q^+eDVOvI!5bd*58v9*h#{%Z( zEwSs%*%oi;QTa)eyEfp$x^+KdztzY^-}Y$SqQPkjt-*0kqij>%`WJEaH-`q;EA5ni za{^y&VyGr9v;I$mo>;a>Ms@s1mH!<^1;oh`gN$4=aY$&K{#cnBcKiH;>!M)~-x@aZ zpyepGQetE2uOC=_9GHu0r1~ty*o5~iyrr{-M!(f&nu|;yGY@ByE;D2pZF~AfuAvJ$ zb9y$!Yu(v0n7=)-(x>2gHoM%~Zdt%SAAIuEbzrApl%{{e-y4!031TIZbBB_CD9ruQ zW8sl+Xx^$ye6^?+w26ygITm=A&SCs#ZrO2xd#Ljv(8<*_TeA2m z*G7*bF%&In2G!#em*esP2}|X9V-Db+f$IkKW*a=rfNW|UnHTR#ef8<6^v_?WVzxi`QppMQ`ydLcq$zd9a1#@3qxW7;#HH&al9vXMb3uk;pmbUD|QJDQ)E`h=f~EX<`b}G0W#mwxFq}qeI=~1@rKZI$j10f z4wRze5N@5KuHe{qn!uSOSQB5w`-fPtc8wFBqw#xeCZ`N>?Z#pqL_O?YGm3m?aq)bB zQ{ZL%*rzb|zj|bvn4c}OKuh3s)Q|p(tZuXn={Gs%qI`w%+RhiUGvVS}4EtV84vGKGR3KvHFjQ!|)!A$AzH!V&??-BO3{XyGVMnXCtUJ;@SDl3t;F5wXaSC zwl#;yY*rfDUrPiZ9#U4V8J}YYyUcz(dtx<+iV;L9Sk-+KxWO3x>1GG=Kw~EjXkp*} z9=qGZ)&P>-5e;q=O$cBWS1;mp!0J|A!(;Tl@j?71I%SN0irN~ofM7ncg_g_io@>?Px(5uBDZQ zfXiMS=9lm-%~{)!GBBBINS8++ex=Kf(G;{I&*R!T*)eGKvr1~p8#Co2+$NEtWM^8a zURls7CcrOh(PA&v=ExbR6$c3mC%;nzWxECQnAXr_hE-mVW-^xYi@^BwWvV2Fo6zUs zx7Kv|vlpd8cW^s8cJp5sxG1S{1~8(e;^5=p`D9@w=LqWDhH?z;%ibXM(yK0wgV&UX zT~>utKX@k@U2`t?{J{0zzRDLWxX|fCTLfcADuE6@kFTytk4&tdK!2~V|Flf)O!A#A z!temCw8#6Yg3wa{MvMR#_*rOgbrk3;OBFi|r`gYvHZ2~S%YL_a$9_(YFCo*JX``e3 zOT^1z65EOt0i4z?W7DozIwNAz_@qa)(ZX&<40j>JACk%5yA=}0l_GqSM_-~S7~yRP zf;3sw^1~&5%xTKBn}q{TDO(_IkXpoDDqSfA-0?fRO?6nR-DZGMmLMUn^B1fQ0Z{f6CMw_xBqbP+ZaxXpwN_13Zmts#n!_Q&tmG ziMQT_o|T)#7a$XyoB=<3LT;&fQ}rVQJL99bVPby=wIB~mjn&W7753g6JlKPwl5biG zNfEeL;CI2+XBq}AK2a8J-g>Q9L|#4o{CmM3BjAIMF#JVt;7^?a+bW3t z_`9M!=Oc)u#ZflmjwR#f3!(p)fPYxg&wp4E{I-zotm)&|1V(DrzttljfUy_7gj{Z=nuN%d3pY+5x7{jc{EJXUoMxxkT$2K z3k+>Jfnv?i&mWSFG!E{FXnOSMIu}63WCF8~si5Pne!v1=F8{qG_w_L5$V1hg_b^;Z zm_IX4cn3?I_mbkyi3g$0rI^=$$noL$o3=RuKgKu^U#G`UaxdqVrP@wl0m@v@I;*B#RW6}hiH#^Egeb; zo^66ce1A->S!#7D+t92yON^0Uk|hmXTcqr(1{V$RLe~)_T;05q-+8vG-0Frp!u|eo z=vB=&&BLtOaO=QgAV`s)Kt~dThAUsCU^g+Y`Xc9|4h`l{p98ff-SQz2{(wB-;Y++! z)35A!Dr3qe)8{BE-kZv9R>(m>kPi3?lkl%hA+#a9`$5WPR*i>kMyiT@)apj6z7fXM0fMW}14YyJJn z1-FWWs4T~-NUUy#kmHeZ?QALatMME&_ta!st_rxM6O_+C8-LGb{HJf(2b$8Yy>gS! z20b$mgPBOJYHr4|t~bQMTan*}At_^$U5UZ&ugj(NMxYDf#J( z&WGg;1W1Cq9}s|E+GlNig}(KRD!yVS@J^AF(mU+k+=Fm@ zD*Lv>(=FEZQnnwoZFzZ>Dce@#+o)LlsmnwS@Ou41aDhm59&pO(2P-%{^o|x3%Yf|b zM2b{5OH2wg9~0F5F1&i0m*Tb(#@kXk;tQFyVK$ zx{V zkB{qTK#!Ey*B+pbk(kFcG@Wbf5&gu;9M8e~hI=h;Zbi1lV5(pr>l=}K3%=#V1$~zW z$1y_#uxlZQVO6m1h0*jB56;aMn=PmP;X-}dYcI-&YcV|s`lM(P`TsJsWKD@`1Nf`a^Hspoto&47m2!NQ_}do0|sL|Y1y z^6SQlmrg1Tb5kgd-*JAI9ZM9&kZ~1?-bN8*uoZ}=DIr||-a=Fy}lEGto4YyPK)$%Hrs5nq< zwU~c)zrKMHFLh?Y=EK-$a3Dq9RwU)<=cl-pd7RN$lKibcoj@`S6!?o1yJ?a40%r64 zrl1?P@=ud*Qjwz*V0r2HIrGhRK~wq@>9JpPz7-U#YoFFskX+=0+w=({U$26g(8qbT z#}%YoW=6=!;ch#%;@u<@;&^=-)1-SpFQ?_I8;LL`_{_>D?GTW$5^|U5xwU^0J6gwh zq&j1U0m|xb?h7T2zQMI|y3}~-M4dE;2}BU4V#BDGpY<=F9idi>oago<<3>pAiuW}i z9F4Q4ORcVT0Yv>nV5t8EjB<&2L`@#rIrv6!Fhn1_7Q7YLhAstZ=@9AeMvyM)Zjf&2 zmXhv}mQ;y>0ft79?vn2A976j2`G3|q>wVVwa6Zm_nzioP_rCTO`vzk=)oaBw72hN4 z>2vrdv8Z&kXHcy#BFP7r8%|&r44XC*c7H}i0T{_ZkypPibb@T3&yuhgjYLDQB`7~0 zzX5lu`+gwVA!nfPPZ$s^A4MTVU%)Jjg3XimZ4H5ba7E!4ye^O4;(;Zq@D z>~Zg9N#-X{$_|l}Q}iTe+IQHwKNWaw_rR};2R-GSgMxi7d!2hz*JSL0Pf@C28IZKD;-GNg(Jew^DrE!5+_sR z0Ev}UEwh2hD`2qPf|?&xrCjpGPI|TRb$qwf8*HfiWAW(1cBZrCT9Dw( zy1Lk7r02IUczFC-nYkjRQ8ev0PH3_Da4kjm71pZNFIgr`oH67l>fShXO1}gmZcS+C ziWxpKXs4+RAAh~l-$gU_Hc&>TCujDI0}bbEx3^jBWigYiso1#rrSm`NC8h8d6?Nyd z>DY5NaazmHUcsD9Cb{F2=N~808eE)H?MoiXeefM)BNQ~F_&S1ETbTvn^wtF5p9Mm192;(q6rCX9)mDi4somEIOMT^EI?p- zYyV$jpqa`2ceSm*bo=UELn?N`x!7h6+0}1ws>}_%vqf#9$&1F%48Z>&-uU8cM}eho zF@3H(%;=>z$1)?}cu+p~{xwk0!E7wZ$pI0BA;PmDwW0{aw0%td=EiPp86MQOQh8KG z90LWl!=20FZNWa4HV8Z0>c(*I1hae$C^zaHdFE{UmPtR6ZVPg2tTD)fu1iDjopGli zmja{2TAL+C^YfRZghP4-;|3uPcEQ_qM7SDX(3DBw($mHr_JS&EH1TryX$48RC65Au zWfi87{?SrB=FuQrrCMWVxE2=_STe54XIT|CK#r0Fxg5$BJ(X7=daxGi6ahRJUaHIH zd0z1~ZCKzdp=pXR&VD?0Wn^X_#6;B8-8MitKg;*Gdy$vljP_l%!>hQxE*B?3EQUp= z!%sdgF6>i=IHXsObdXxp4wcj=k?*?xRm^P>UZ&3JCp~xaM)@tvO2^lSIa79u>B9{k z_A?7vTG@`@by%zcYrl+xw{M>T-zPvsB*5UtyBOvx$|AmNj=7UrtR$FH>9f z+te=hxbvPL0FNY~04Xy+DRKYDb|C=*IPH2b!17Wc4GwL82@S!buxf&v{dv_z!`fq6 zttKy4VDsiDeNzW;#U;oPtg48gcD;Ok^^RZ$O2B^&X*SJvG!R>LD+y#-t4T+_gdjBP zpiF$8>LdOjRn=kj4szOtE0WyE^{fl{)?M^e1HL%HaY3;o^?*i$Te3Us4Jk-V2b1gTh&e(HDCs4s7;KKl}PEMVk%fhCGPVZ$YZFr{} z#W7*7M|isaYn*l-Vo53dy)G-_KjxGR1vX&^`g-H#6t_gNo`>O=8k(`7w;qNq}kw#`ni1q7jeZS?@TFG1zZFxc$_hHtx4q%6<%d43@9jl1`*^#m%Dkh z;{oz}zrd}vuSO|kA7v*2702c6LYKSB0ta40+)WNLz5vDD)~B0<{O1SpqtoXx@%XxU z#eC12k^KXb`2^A=gg#8I$%n7aE55DMCEec8&m$i~+f z8O=SdznM_haz687-!uuVk!5FT(08M1OqlF<$4W#=Ds557Jw%Q{ydu0Deoqt2JlswKUduuuE8Sr=6FP$R+@ z(?{wva#~hK#eI?ps;e^A^QT>G6TDW|I|IvKn5QBIuC3r8jaua9nJj$V-^oe4c{$Ia z4cB^@p2g6GSp-qbNd~trO%pZhiO*iTe{edA)V1V9j` zqA*oDj_@^q;lNlS`?*?x$#TVH*vG{!-X$om~*AX zG*4?^a-x!a%kG#KBB!~6{lAf-;G0%E=ihLj+Dn1pjUJ!7YVZLv>DFwpoM#=E0W&4V zu@;{yAWhm-lqo7b-GHO%yzr&H%8I$QAqnfwqtQq*rER{L8p&f^KEtU^bwDBfKye`8 zPkPG3z`}j#O@@%v==E79`puqD>`LBV-~i`U)db^Z(_K;xJ$7Qvt4w+svd0fKTU&=L zQcHp%mvp^90Kd}P6fYegw%=f&)X~ z)HC(JD}@-U{HVn7&?pV(XIa-?jU$^<+|~rY>+l0Kby~9GjA1>l1|gf(v%meUFVo*3 zCP@cTfOJo0waQfeFJy!@O`p6oMuIN8_~%epZUW8QhJNZht^4Tw#{AIyAz35}4F>ZK zEHiTLwewqVvaeLixgG=&!ifd7&T*N%uC>5$=>cWNPK$wAmM&iRQPHVwK}7M#CIU+D zoYvMGn(HM~>}YmK;-!>+qY7}c?dfU-Tg}kSj4V04G#9Iv7Z_f%uJzO*v$r{|`7?{K znRUtiD5NGf`=(lFaOSTTuZ|Oh%%Dx)Q!%?>AG9bDUB`^wg0`3o-xV0FbtHS%b|{^# zW||qP-?^F~W@hepQ+JZHJ$<(;N(c>LaI)W>p^UHx zV7Vr|;8yRB^q&Vy?Be;NYR>eh2?=d7CSnoiY6`i_ zzfEc>!(~$_e^>g4Hp0#-@T2Yw(8~eQKXJ35>c~sB z9Ci56$3OWUykfFF!0p@60V`KEe=}Mle#A2PPDqaZ*zBt9lMLbI>jPw6F$AXrV{ifX zO*tYwjrtBRz|I9ya_5zGCI0cpN^UyIUI=y1w&*ZUUh5}xC0hM&=FmD~{Ssx};a&Yo z1P&AX6R75pp&~Eh9O4pWxn^Xv^6KMSxV72Fh#8+JF>4c0fSZZr6tB-lW(o`wqO$`;XdEQFLZ z=^>uIPngy>r;sr+;|Nf)2P)AuxH`i@GQtD_9eEggb8HDL70(#A)X*+oO0wydMAx4i z=RWQzu4ZO=Awe&(DR3y&|GQ7$>FIHIaj^ zrL*sycJ|`?xzQ(z(NIPZRX?+H33iNk#mUuzUGPc4&J0h(5TVHC*Yxa$?zYQ{x#pb*cZ^prT`|=y%?a9ekE2g_$>6^_=nHo}ra^ z3bc0o`e9tcRQZM#vJyun_S+{<_o4xwH|=76LoM-dO^q?wyfsS$4-6bDeMH{WFG8(= zXK8ZLfMmK`)c-XDTp-OkwFlg2lZ zeUR`sGLyNSy#IWX9Axrvp`+VQusO>4&r^)^=0t_`Kj>NfpU_H{xmw+cg6cwj8kN90 z{F!ByJNPAuNO$*_iSxO+SQ6=tJBIdnVv?7U;J1bjdYSQrhF(^U#=#MU>oJ2FeFxWd z)9yzl){fA6xDbocx>OurcCchqW)huqT!M#O@HJKrH17tS5sI*5g>RUCP2v{(4u(4J znL?Niln|aE{#FNBY3edltxf-S+a$IxK(vzrhp3C^`mrkSwR6~M)Bu=lphesdp@AgI`i(2 zX1duG(>l>XjD>71hFXKRBIroh9(cqYz$=yL2I#9|rEfA3|%zO~#(1=q8Ll zd(ef|p#;@<#OH-JY_Ys0UxnET^FW0GQp|khe}pcJca(@E;Lz#w%ZVxjK5wZTU4#OU zgZ@gu_q{z8#gsrgHQhwqbB(d_ZWHv^avd6o0UmXf8`vZ{Sd33?4He{tx2}f z0*8i7RF+Ros5goD22l2JTV+CiLJ77V?a@?LDUw`f=2B7nY3uje9BzG5i-}88>&EaV z5f5*Ke^eXk7vIN7F^5@Wzud62{#fr*Ik<-HYa#jT&r6e5U$yLes?WMh;75+C;`1=p zQN0>S|#7bHrXp z`z0`Hl4hm*9`xqY%+_abE*FBXUoI>{TGCy&nBwDAHMR$h>o6GgPJcCUeA7nV5++MD zsdcLajBNwj>;~3DJM7TW9c{zwso6;aMP_4>)1Q)iVC*K02p%kL+VAvXrqU;U`Pb8B zlB<}H29GK$jE1lKoY}LGO`pC(0^)d8K|lw5c)G!FD9!j!-17ZT+)5CAYOFN*wSt0* z`WRYP>qL#FLE9@M@je2(VksSIO>!nA7_K`K^G|7QDa~fZ_zug*S~)@boc3JsZIKJk6;Pjo?L_zcEm$`n?hf~3zL*O{H4<`V#`9k zTZuTZQ?ZE??C)_GQvByr-l^N$)-s}3X}%)6Xhtww*s$SbpXB*hgNcYJVivOjYnw9@ z6Z77m+xjTIJ5E~nx$Bro(ifOn6$5%Q-ZU)avk!o?YM`~{Fx~a$H!+#7a?ed3$Jeyd z-F*=d4h#G-BILB25%|}i1V+W;gwtp)LxWX2q-p}b@!X{u+7*E`m?vfVR5G`2A?$QFfvoyeZqQWr?~tWYaxXqavP+Y?0o1GX{j>- z*XB+5>_2KX_hmSQmd$eE0^9T@EOt)YX?6Oj?ldmTp1^uV zG!t4WtIjFR8TfEG}C(vl zyIx}mIruj0rp7lf+c@TNIPznMu@pkNGD>6A0@i_ z{Jq4_#eJ1MkwS)`KrKKe!$)hrWYH(*O&aWoF=KxI^hx)-!9y+q{`ig0&y{;c3U(}R5_5M&IWo2Sh$?X`gvQJEJvhtyslU}PY$?p&MfiA+Zy$F0a5#i;d+M^rV zcFo(5Y9rlagSrD*uGus3m~AODYz=YD8S|)Ox6Z1XJ@SH-A-D3>7A%&88i3jW3>7l} zs<7{?cQjB*Y}3vUf2@yA`~ug)tMuz9N5%YnH*Hli@FZTcki1Lmn^kckjzI_)zU@lH? z6|xj)`ed77D0C9dkWDFzdp{lt-dTNg!e%b0Eeg4fSZb~FIUeymZw|jskKxl5c6;5> zJTs(e@SFI@WWu({(>`5S+WlW8gGhogk%@??q`XbYLV`$L*=iPL&~Wpwd(6XQMhnh_ zzo{@ThT+eJP>%v5R+ldvsKnC-Oq5#C#B(a_G!aC~ zv5X2&Ml@9=rfzuoacv?}i!DrnL&oAO9+=3Hb)=I(j8e4^<<@q)C}3g-`C4|@sa{*+ z4g=9btD`kE94m_{CLu`Syb^p|euS!XttoS0^?=ziE79j|xBTe#3el!t9VGZ?joyn3 zno2|k@xNEQ=;cjnYR?%8`a}V&5z?Q8PgMG3p1_J;GiWt385bWOL=r&6g5C zN|^pB8_U7{cHnPy$<2KY!wt5GJr!deFnqU_9|0cnxptHPmQ8G8N-5R(=;m3%YES=) zzJAexuiJdKW?c1i`W+q^j>>dz+BY2_Q&!w^Tf<48a`(5%SLVuEc?Uo?=lg%Kd4Jq= z563nU_kAY4-ziP@sOMX}xFz?zlRrEz_9#EKrS{Zn83 zG0b%{Vxa9E3#q6W}E7v#vDd7|*W_Z3ad^Y53e z$v?q~&#r7?4hCDJ&EQPyKsKLFswxeSDr#z-Gaq5N2N>?t+VJt+2ZF_1sdt_`9sU7q zV*#4)LA!@Q4}X*DKe}yQA@$&6xhh3xxvGnhfObl95_m>_LRVBTa&jfQ#C(CHH%!UI z0r{O^J>SZ*10mo@xIqa2o9Y(vrnD%F|MWJzBAxtw7uVtU9eqZ zPO_e{YWnIoC5kOSZ?IR$DQ+g=e8#$|-kJ!sFzZsHT3%khbf~m7_vf8DNk>$*GiKxX z_UhuIwO7qiQCr(-z-etcq}}(@BHlqYoY^mfo&Qy~TGZ}$D)CY9AbTeNET5CU^6a$yRX5wT=v-5V9q7Xh+sdMY^$aV zd*9ba$2~6`+1#lYOS&mpZ(^j z>#rDV>=hL|e+buHt?$ttem*C9DhaG}B>=csUmJG7)50@_p)(X9(iwp#bq=Urp%hp*)LnP@KgdjoF=y}qYt7zt+w`{>x2 zRof2k>=BVjO(g%?rv!9aAH5dLyB{->M~Mee(^oD|oui13R^RN_IwkZ%Z2 zR&z8`MCj|5-W%E@!~4h#LSLdEMtYo$Uu?O8%D5CryK9SE&E9C83nyqgXo|jF*m_dO zO^g)@4f(uv6p1-ZIR?GO7Ul>d-;wTrlhmSVgJ_`vN~7bJHlCmVcq-`JWbFq zRg&8oVK6eJXd`ZAnOOHyoAsA#+=$Plo_6;^z!+@;!kb;R$Z;z~@%Gufz`yz0dw8&5 zEJ7tIUBnGdMev|bVxPi=zBy|wjZ}+9J9weAqC0Ych5rN!o#jQyr^ADD6!Wx`Xq{EJ z87_9K7v-=YAJyca9yS;QAb_6((mW6><7ORlAt4BJITu-kUEvGEQ|Zk&pMn02WgC}D`^FxXpyr0`~_Yw;7+@(5v4g%CHfC-rv8ts9cvhsiZ14( zIw8M=!ijozJx}guQfl*hG}hT!XkJ}fhD<@Qk3>L#X0|({aKM$}eM+s$+iNFM#0@dA zaSlG+ZS5GWSQ}C8Tbmg93uicvb{kUyCAL;!f*`WuH!KTd^8s$qMmb=_*%-vOT`MB- zU+7WbG@apH&@H!nh`yl#e}ty3ubH##n2?6}lqp=`8O_tDqS=F^P@$POozJxm-hNt6 zv#^r3ffosVJ=9h4km-UpEfl&}r&22>mQm$1{U&?H>!nYmEIS?nX>fhhbzKI;mu$*Q z6kStcLXo2wcFyMFMh7JpSZ*ymvisbD&TMX`#%m%cU%$k?$YMA_jmJP?m{JhExJ<$F z&@npK=ZxLiG{qh=He42$5qax~7a927jyaDk#3x9)cF&roBcQM^n_*RsG<)x}THmM2 zyA;BvZwebj%rS50EGoCL>zC9TynicSQBvw8=a_vcIG&Y_EdI6UFIwdLy$)i<%1+1o zsYXx|B4uIl$Q?uIicykLYB!P2o@1Y_!v?EWS+Sw7GIC@lWUpprZ+hFbm|6wUlqFkn z;bRE@>XNYaJq7hY_$fNt{jZN*d~dg(Woub4qNHRz^BHSf>WoN2bfr~a6DaAB!WOgi z3b^rmx`gkJ)QbDv84Rx*Noa}fyxQsRKzJ0U9}urt?oB=8&wBl z98wv~=v&(XTHr)0VqI$QR=*?G&-gqIdb_WMD?}hY*Ow>tZq3sRyH#}?MG5-*Evt3w zMszDdM#gWNSj{yO!L1q1T7ydX7k}_&tb>(2jgkX2HOr6|t4YbUR8jG|;OL!(> zHMMCT*PHD$#d};I=fL7ciha`Rz~2v}NF0=wpBP!THDEU9{)@0}d||KqTbRd46wF11 zJ|If$1NquK1_KAfmMsaQyo8U9eyTyx9<{#>u|-Xxgim~2|CON%o2ggPbyxmS+D)v_$_$w4fsFASNOpf-Pug zg%$2NxEusNs;zGdzbErSTVOcflr>xj)u{M@J%^f)}6k@t@B6P2T~M(> z@YH#V{N<$WRWGtBhY^w-`?aeW=$Y33+6{?BkfU79o~usO;(eO6iRtdce&j>a#@JCo$K2^nNS)j!w!E1fcSmu=-YGS<#m>-c z>L%t3%^sDE?N$pM(7_kAN}p*AMQ20n&_d~^fJ`Xfq3a1s3oi( z8cf93SrYu$6q_LkU!k$OtW)z9!HbEH_;xzSIs##Rx?ZMYYYdE|Lz2c$N7Hs5lpzz# zb%zFli^u2NF(9f_zvuho1KU_m2w%EX_4Le&`A`Ns%#l7XmcG9dW;KG&NBKbvk-?-_ z(efPyIBe|0E^aiJo0iEZQ;Kj@3SEj93KY)mv1+f8HucD3XSokwOeWNS7v<;OUMiAC zzV!^Z|EW7of65u7q%V+~PIj9G7xe2LszS(>=*Tv7PvCC^a8pdxh{D`K(A$ zZ|iIgv?y*&>xU8q`QO^aGEy-M-`*l$7T+O#lLuU@44%m(ZIGI1_D-Hj*(^JUuIevI zSe(B7CnXqRM3D@NH-U8$HJN4X(`&>%Xur8@%8l0|y9f@b6jpYT{@6zX4&|Cs)iO)v zd}~U3-=|`<4@GR{qXI*t0au3}@04o85G;PJqWV@uU63Dif8qnfF;Ks?XZNbNYm-Cy zQpX90ZEI^AoM7B43pNHWk$esAFZJ6-V3+d{K^#H)D{rJ7`m~&yYQ2Asu(9|IlK)*; zyLpsz;@qg}2YWURW#W&}0cMeOd_cIHO1ly6AbY)OPX(EY}9Ff053^#_-0^HpL zkutsPeFm}DTDKOmhG~&3w4`lz*!mbvtnQZU)8$m$#3ZyC5zsQdVvI-0ohAbaAT6(l zOc)=B(jMsm8l@`pKNjRPsG^)+WlTcj&xRsbS7Np_&tDJIABxvtsSON$VYY9yv%S06QgNz-J-_yA*{onE zo<)E`bunqk(*eCw(iNl90GF+k#+2d*>#ij)#e(|luSr;FM!{hZyvk2+XOu-E7BUxd zwg#}TiH=t(PIt8Jtfv_fcIZOW8@@<%)jdqtR*I#Uj1A{5+S;qB&$&)AdJf)I!t4fc zxAc$e^zy)Z)mYqCBkT_)5#KUmc`(P-AEm4`I~^1T`#7(X{k17DB}H!QEXOVkJCbGc z(xqouh^`4@$D^fxlUZ0n*uK(g7B9^-jO$pQF(Q9JJp>=*UtJJcfP^Ui;mj!C_ac1c z|BZ3#g8zj8`2WtT#^_DRexTfA$%^p*og4Cs>f3};jW`0wn9kR3FWp~v6S})3%9Bp| z(e)49D*7MU1&)#Ii&`gFR&mGl*UU_-!kQJ~z;JoN6q}$qZpq5NdNoN=&Vhj8l98?- zvMKew>|5PV49|G<)~ruYTEE>FRyjWHd{8Vc#>RgDO=azW)kAm0Oo_l#%HrrX*UU4s^a2zL7M zV#hdQmgvwkZ&(6<3%m47DJQ>AA9wW#Cg*g(C{F#Xr+7|Qio9b#GOr4AHxUyxwNP(p z%+tqELTXgKCYSBStB@@wXGSKVQ+|(Mu32@|r}9bm?P8?H*JAlo=QGsy<~)9%r-*yk zHWgHCBBhh>5d2YplMWG4rYX%*RU${~8W>-NzJuxtY#QdMABlM%9vj4JbMw*hyw=!b z?Ot``D?nh%u4Q zZ8)`dep>3sYkgE*HOJheok{{OF>=lLn@k4`Ag)df_b^?F(>HV@cnb$-W(=E(bC@6@ zu?$&yX-@i!eH|)~C3Ca!anW+{3VDoX&+m>gf>EW=237LanC;lm`MsM2 z)forWI>9kPO`l*M9ujP36i~3$ArfJ)PMBD6mKbvD8$t_B-Tc7!{0`6;+Tpo&NT@Ur z%8XRr6lzV-seZ49H6+K{S-|~AWq!&F49fnGNTXB4i8U8&n8BS=#P439i^~}-rdQ6m z&ZMx4TF1Y*!W+bc6e0#7FWwvrJN(u816C&l1ay+$F)EOiv3QWB*=+n$i+nuO{?7Xz7@U zu5&xDwlANw-K?OGkslQ-v{~w(?57Ep9Da+;)*Z{8Jt=tD?@kr>z@z>`#MU{EM9UgG ztPtynXeuBY#!v3%!~MuKwL#_oY)biv_?6G9JRfW0m0obb%MW6xBSXpd@Ydg3_KoO? z2r9rgQUY3P?S`)-gBIMu{gEM3xV!Nc6rJ_oCJa+&(Y$Ph z|Ey;}NdH;SM5#HjHHzj$z==3@tTHmO2|TEZHp<9EE9l&*9PjTEVVf8q520)=8-IAO zQ(rLqpOLOaDmsM3Vx@;*Zn(2>7%yS}h$bZlUo^j(7a&^muq~A5#vJ3qLIJ@oGg^q7 z6q7u&KzF!(OW~sVPH~HO!OXTG{Lr^6is#$64L+oN^M_98lHrN=&(3w3tEw@U%=*K? ze0Ufc=<_S!iSjupQ@p7c*9Nr~+k*N`l-V@zkt{Z>fu95#?9;hvE@t^6AH`p_$-vjV zVH3AMqxl9cJZu%r!s&IG*lGz89BVcq>&z;o@9;E?jKdHSG@a69^^nY(S~qazKKYHn4ri5ZBY6RQ#r&;HbV>;3JiqHf%fTTXV) z6B&%yf9dKzoGs$zxQ++TyN&qIcCx@!X#9Kjq_OL1JI3T1frwq-QnhLA;s0%_67CtQ zL3gD|^xnY6I2$15MKPa0_)ia{^RSrLkOuTX=i7?VZD`?it1$ZccRz*qK+vAWANdRB z88fXgMX&jjguIm{&I1P0*mjZ?x6kau^71xqKHN_ZfBk}W%XCSK?V?^=T^!hWkla?& zpZ3UMx=&S54C+rRjwYA9xL2cWLLb|VH&8&xf9R&xAD$h$u}+>?a%ISVUPBcp4!)Ht zsBaDAqs}LITU|%%44H#e#*GT9!7H<50t|C!p!DT6Y=VyPrOE9X*K42j z#YSXIrw*&%$Jf}Y6{qK``r+R2u3v#2gDp23GZe&m7j%BC6JmexW4;&?=osHi2mAEo z1N1vi;Ws_Tr`82RF0+f`PVRo0m%;vl6lIC0A`A8{cUG0`rh#)Ix8?M&Gc!KR0U`8X zoHyy8^m}cL8lOxWd`og=lx~l+8|HrYDAwqX`Tk;_c)I`Alea<Vf7>djA_4Mek9q&J>Esr{#3?Eh0QPR4_8 z*OCp^TEz;?vAUbSaFUR1Szs0wCj3w@e3!Y6_=~KvJcf_GMtli2sy%93BY8?u{sQYS zRYX>in=oQ=NDWgxHvt$zTPoM1MO9blT7R5N@;k>k&}X%uonLf%A&CtAAEmy_>))P&6pp{oi24CGn+P4-TU_W) zNY}_2SLz_}cqz60H`gL;61LAOz^!IvuCj}{7Iy@7y#Igjr zchZj8=%Fp-g-QxnJzpg~|E#>5m1X(?U%A_tv%81p+yp2~wCaQ{XAx73GOma;XJ6TO zBiX`!^NOUf+Cd~^@p5M?otFeZ=c%4|{k@r`Pw6xl3w!^K`R`V<{~r*1 zvf9%=Fpe_IyZm_)8+v(;h4;F-Nwt*Q<{oDG^~}R#A>Cu>SY8-5Cr`>Zr(R)>PYF?{ zqdiO;3tGn4;KK*>L0f9~)&WkXf2oHqc4y6LfVzn$c7<2}GZ=Q!%)a9e0@i8H5&gp7 z8CL|6j|C0-ppH9g4=jbb#jL zp$FYUR9{gI_K}Am4ToAld&&8@xSFbUln&qT_!k_Y1v?}idNO81A?P^h@W8({vzxK=`SRYmwWnXt|mkFkJunaD# zpB#nD_Ujvi9yzvkrmq{V3G~W`tT|5-)zY?p>bmNNU+YM$)Wv*mzdCH2SvEnF;5j~j zJPpbxN6TETv4}JnRwVQn7+3^owB{+J9 z5@4o%?wWRdN19SSUU9p@qmexi(Cv?N91R?9ldY5-gqp%?z9>$M&pxeFCM-EI@dMyA z?N6N57|K5`m9JI0_J4^ScRIiBc<(oU1@>UZ0_L^}-gF*e20#aS1}?uzTOVKvxhm$@ zP35;=5p>$OdOU|lKyVL`Mjiv5z4oN$vQ^LU4`vV3Ktq3@F~KKUXru@VPi5uE%RfRm z*dzxXzhWAL!8P&|?a2~Dq_0aK%?6MXMTbI5SauS~GG(jS=7k6!WX(AtLKG0_ zZ5MdwN=%AWat0~B+FxbGxZ0z}2L*l+>fb9A{OT;ApNb!u*HvHZ zB^gY(gIsYq_mbGna=0oyN3LvrUX;e$au6ndTv{pX;2#975VXL>1ML$|5zk}^D=vUV zIQdy1_>xq^^;Vjk=Ud})6PqOJ=&cUc+tf-!SpO(es`z2IhU1Hi3V-{QSzD0C)4PYO zo0o(7fso_FWP~#lIYecU~s&XIzlqBs3w0W3fp6O=pjfKBrZtw+*e#Ts0e8 zf&R&B@knE}cZQiQWoh0`6d)*JyKaT|qv9;Tk~Jz;lWu)wk9ukrW{D)Cq=ai*$QZ8} zb&N3lD&p?tu%c9q1BHQDMTuSyl`dtWW8n~6KNt!Amq*qaFHg}eiF{TVJlL( z2Oruo%+#LAbHm!0BbQiyK$?(6@89|9nu_+U|E3^Nx2BoR52`WxYqBcZ{G~AyI8Z95(5(dG)Ju?ty z7!2WAD7F>WvN{j#cyNU1C3v382p@NkDB%jdW`BL?O<{Xs?^Xk>_E9~<-Po54+5c}L zP~@maKn74yyy4c)x4_uh>IIA&14ULWw%7jvjm8b|pl-)=X7g{00bq{`k;yI1{m}?N z=UDH2eTNS!hm@B8&(WL}tCmRgVcJ%N8wUimZgYwfKb;QQlWlmwmhrlAvk49w+SH#V zBl+OgYdULLBfqpWgXSRIF3|v_a+7 zXKMp^P7|96>&AQ?NWg^Q8gH<{G&Eqju_36tIS5X=*y7$mP(za;{9qO?B!P06eo;qx zqY!;$uS-Iu`*|o_q{IMLGH=%(8@NAa1luyGu9 z8Es^cd(<}HK9?n3jgraCD!)c$@AKY^F~qQjRFNE_vOqay@I}qA^|!_5iVt-G$J5UB zu=s0N)z^U2PYxGb33yKd*q#;kKRP4Ia7K}He2|Kcm1B4{+yaB;lJh-UL9&cmyjD2WK80Y_ z)C;ex63dFKArbYaIxz7Lfzm0L2nV8=6S7fQERgn1cU8f>+s;NotQ$1f;GGt5G3tDO z{y(;wB;Y-;bed)Chu?vBKc1rsqYTf!vsw_Sws-~_$E6QQIX23*(h#?OuH!Jfj_&6C zypDqdxfWFtvt4WUs4Chpq^HWe*6*?trDIj8<6b^)sPG1FAty4=v=ED)3O{Y;M*IC9 zZcG!aq%8MOl)gqh`&w$=eny3Gv>brjpz#wTng7i&fZ+L$SlY)36@bH^ z%J+L5BvV!KJypXDLjU#;EvPlWiW)3x3pLr=cKwsR;tEuERG4FoGuqk-n{V&t=WfcJ z;sx8_q=*=J$4G@*uXYQenV)Mbd1DdrpxN83(63y@Lft&Gz<28d;rJ8%^R32po#IQt zC>Skb4D2KXwAY5Ko3uosBwEJyR?8AMQHU4QzB2QRgQIQtBWzV;2jQ6!52{9lcq$H2 ztHDnO=MmNmwT!){`7=KfB_c@6=@&aLAa!LF$|4F*Na$9tN zEbevflW7{x9x&#&YN2D~p>^@)#Ub~Yz;dAfX~KaI-7+1yq@N~f@+*w5j;|t87iS@` zTG!QIK3BdHaBVEAA(-me=bK=$yqYq|Dn(y-(6a<()&8P`)4jiQq-dz0Xu7{_pP|7V ztl#tcs804qOyTJ7M+2dtsCqoq_E^^iDzn&@NMBc-{03yBn953{nQO-DshRjb&^nJ5 zBLYr&J5`eqwDB#=CcGPRBXBwX;|o?mL9jW}cH1E>ziwZS;ETF(6=20Bt-J__@_$4v zuFyZC_TC5>jPH_AG}St_4&<>ua8u61XIZO>@?fJd$k&8Lr==&HjN-{v`feKCks)@` zX9ckc8=mu5bQQUb8)2aLY_9e`As61GUJcjHR^p-IhCO?&bF?@9W4w2Ga**P*Om@9d ze)O~A4wJ&$dcO&b``=#ehd#Ye03c(L2L6-jH1~mz8uvZ)Pq=3cUM9#%xeyPhe3E4Jo#c4o$Bk~?x9>tqua#}?HY_o*S$G#4KO(1J z9D;XLp>gHf3%{F;#FA!pw^l({^63O)J|10;Wp?yH3zVMMJ#qNgEUo*p0X%x-oxpvL z541{=pZ_Qxo7ak20sqeJPumpGN68(ne{K=A?MZu1Dd)Q$UP~eDod03^x&UnJgsP8O z*z5$0bMchk4c`EBi#FBUS{Ea+5d&Rv)nf*j#F6z8(=rx9UInmuNVK*_8tR0{-X*k|AA|HG6_h#}mgI=qj}+ zqT1rhOJ?oX?|Xt4;%OH5!y2~34IEWPplfpFVwCbpLLqaUu-Yib8w&lAmk$P}ebAoPV3QfdO{PKgassl17931M5 zB|N)MPtR_kV}Lz_EU0$(yNcqja97`Q?6Nx_H&WeJVHEAznB9Oi*`BoU@D=?|+Voqj zPb8|k)OcN317D!Hi{IYsXd z2IgfgTsB|GvsDQ8gTm%DRo=9sgv4HBGS~xzi2Z=NKczf5&@QnvXwCkzwc7d3JFB&J zd`7wAQIn!^U@Ivttsv)WVLwu^JM9dtZ%V6dU@+`rrhnvwx8$EU;P8xgKZ zFEwrlJB^=)10UjrP;vF|dbn{tnoDWAk)dh5v5(x{{+UnoyQ#?E8*Gb7_*pc+;0Rhe zTciG&Mn%c!!R-{%c>JKG%d=LKqO+}aPm+9q#bNu0J0Ix-&h@kncxOV~=2`y_!)JUW z*~b;5X7(dfXN_{|;o?b~bCYkLztEzIyWiRo*Egc{r>z5LjA!ctAF;Ok{;*$h+tJ%&U~xa{a`Ke zbwEtJ{=4XyU$(uvec--*3gPiTJlQ+5nL8sO6_8Acge8wSfLxy z#j>ItSP_Q*u78Qx;?*NS$mE(34qp~L_$I7qPqL}O-e|0bwFibktY>nzysN77{z5fT zv1p$W_Be$_f56`Qfan(Pp&Wgoq8R%xjbTwFlD{k;@1+kzZjzEu(guycfK}M5mlodw{WDl zWR9LkW5f_s$&r+Su$@5XxE~#eSGXy zW(j6fn3@QAd0moTW~{)g^!Xv2VwP6aF;S|AyjcR%wBpkRE|4Z#BbI6Au>xm>7R=+% zmLp$U50Ij4?0@HRv3Z4_lKWN8e+jEFK=r)5UG*~VyM}kul&G6}jxXQ|c6Hi|;?Om* z-^qs9wUUGnuj}5w|0m^9d+6PsW-b2Dil+ZRE1I5QA9Q|pbc4u=f}Wmde$qP-c}7ZY zJt-2yFCHdc;P!H>oWo6RozNna0YsOQ$7ifLAdxcT?XYh{6YUAUe3UC{JCq4uaaTl$ z*mJWZq|IrNB+ARq+c^NB%Dzj%zM_$XO`OF9fX-r^Dau7eA~qZbQXw6mgLq84EW6v= z{1V8acBcmG&w{+~l^TaZiDDzvN8S_7obbgJpV&B3g$oC8B4=UXc3 zzSH1{E)-*DaEaNOd4nSRi~Q5LBw$cU7t;6Qu0I6$`P|x!C$;~JZ;Z`+ zf2O#qy*c{yU?=g)k>Z~>4WZP7GYxagUL;7`|M#s8ntueXPkjv2gv38S=xEIb zQT4_(@@ZXS8{f)c?Cmao?rygLV{(J9^lz9VYW`Ynvhv7GE;eCaZh$vwX7 zmY~^9zFc^@k?*19c4cfW+=a0QKj_xiZ{4p_PLM$sYed~rEOsvYM+=Vb#X z3L`_q3GSsPSNS4}e~`7=(MG0GUccYC>!682*MUoS%y?zpO2Xz?9Vb>hIg>}?cjrK` z(QGp`(0E&p-Hd-RTlV;k{p1%epcZz$HS^@MH3}2e=6P@EYf7IfmZNG@#w$TZMOEu) z*bKwN(R-$o*WaMKzY((vJ)@C%(14X&wA8bWCL8$ZjpKWs*NJw*81Anp9e0Mbgm7AJ zR?i!oy0*cCMBf&WT{-&U{3t5cZ3yJY^8I@!0+QaH>ZSwN;M>7M8~vYn?-VC)swE@xEfw6dDhym- zOFgRIAELnM`6d@<(8ioCySk0$U?!(e!<9qcY2_SScdl4sFJ3q7iyU^zHaeLIA$F-e zHfg#@b|YT(UVIJ>9Im17zU|5H7#_i}2X>BVPh{G$k#RIwcR+pZ9M7p>?wK5cvZ=$LM=Q~osJ}u8_`+hj_F4@SM{rU(v zEu@Q8bP)a1FR()*rY5NNs(HX(xVblg*2<*4%4vA7eAT3B)ko+5#p2!ut=O&KEg#!h z+bq$JlbE!bZCRK+*>b~egIxWMWT912*KH?GbAq3aEU>MxV^f%=d_<&I2ijki&l=Q! zK}KGF%{&(D##w66)`xw_xR06<-p(M!EM@ji2D3F&qvP?OSrQqBQYX(-C9VfYFO2hzM);JuLmDH zxQ*i`w_}J%GT+}CdHjf+dEb;pS7o@U*dqV^R-+0YbkczVvV>?AwK;Y+xgLEqniE#C zRZN>rV%O7-BVR9m1wGo0Y>DK@hcBVf{v<8; zzi$&gxeMsKskN)BW`G!4u4dzi&FFn+J;lIEDLz3Z&;7b%G!*XJ6);&`bj7 z?KP9_8^tYDBNA1wMJDxjA5;i zMcR(@4!rlH)kZE0M&~C&j1w83jm=7kAe7l!{ojrEMxT%!4g28yCJus>KD|VCIZDDN zT#4f*aHXqHJvjN&kOez!NT+U{Z*9}Juc`mS zz{kB!Hf4^~J`NR2_7;oNaaM>&>NPYS4D)T$3(}$W9#4v$Pz@PKsiB%i#_=9D`O{oOoBK*3eBQP~g`;^Wce{nxo9zS2gIdpUZq2;1DEmDS z_T^JdYB|o7ri{ExJ8`{v{}d#pJ`e}fH_M>Bpkg)7f4`0WPmJXN1?#~cnBaqMhyA~o zhA9ehIh+m6E1xzfevE;;4+;12#lly0r7EUeu2zryH-{6thN~@A5TIKhjp#!77+H4* z@4Oia@UK?+P-!$M*8LmmxSydNh4;ndU#gD%fzrLs?6{W@0&XrHZhr_>2H7<5!Weai zj4Cf&0IbM9uZcQtrv=9{O!Q+ksw1dcV9#z_6Td&jpn25Y{zd`?j8_qaSD~5&7AnUD z9b(@^8b1q5c^jT@1bv`x)#X)E$eJ`li75U#1UOmc?Qc_@{g5e-^#bPe8D2n-0oM#i zQ*AZyxv?W~y?ZAj8H4;zJ~j?o&)(SK)!*+8`RpWml!8?42YeOz#?H!B;XF2440TEf zpxz+s`I};%Xuh`)P0Wxu;^ONVhaM4u+i(5R<-Z(W%PQ*#Pa7e|28umgDSv_7!bEHH z&QBnphL2c%R_2XJ+739fJImve$?+KyWvJy-L^JxySEcCRP3V}f&5a#-Ih zMV82{@aXN0c`P+T0%5jM*X=%t#rO7~n4%%5hRBE`9#kas0BBYL6=a(gnY+tZ10NB^ zBfLBHa7M%`WdqwE#GjLG4`JJfblx;HdjxSE@dxi>VjT3dE7-{;sbZ&+`EnvbnGkk= z&QDt)pIycxUs3{{7}J-H?&5;Nc5AE4b(|fc059wD0&CCt_A9#w%->{uzL*_N?Z?90 zRWTl4$r9aTQ3vm7pEklzgE2U%GlF;y_qzV}tTzPReKqztN4Ej(vfJPOlJZ{3V$tA0 z4_#vaV<9k1DF5K9B$)qI>%_an$a3nXDVBzh_J!5=ZoV(e{gMVs7r*D6>n1S)&>tuO z<6eQsD>1-BHxi})#&Lz+y~Nwd7tGDT;uEybWG=fvM&+t23AxL+92}dg>33+}KZ8_lkls0 zNi6dxjKGbR7&vA83V{0=?D+e&*q+RBff!zWQ{$3=sMk`gUsW|#H%vK@hEyy+&{QUE z>g&WtQBYb5#~dz50*Rv5#INzJi4%082e_|UWX=2j-2q8`eXeP}^Y7LAa!vA3MR5Q? z`#z!JWtRj#NK!z^QYkum9*qHLd!UH0yG-H`GoWaE__qDQVClXB_dI#`Ej{W-&}D+* zR!uu@qm_f2R}-#Rqpm?54EbGhyOH5DZP3chgf-tDB3-@)%4|+)r6LkH*QXyy8OE&5 z`^bA97CWOk__SQI;kO6Mf6vE|pDjMUIqrGg*^c()v14}ApySjOj(=XeY31G1K3nEr zjyevx<(8u}dGt4u%BB_($w`|K=f|RD40yZCY{-t-PmjJ+cgg+<@<|)`&UMK{h`0@G zu#^2(@Brws&ngAK(Pr;onwgKn08OrjpQPnJW#5JBOd?)Q4H|vy?uU<=R6!?7H#O@| zDly{Km8kyl<^dEaztmpj=}`Gd8j>WS&AA-090(Gn>P46KD+BEczCNn!r~Ag#W5(@L zVVrt!;>yrr=NMi`+Eld%Tyg};vZY1o zs*)aj7A$m2OO1BY^FSWQU-iyf{Yx#mHx!mTK_86n-T7UOfeu*01F1!qAKUM&J+X!d z@W;rfopx3a_!{WDRF}fs3h#tlJIi->O9ob>Otp+GK?DV-C35)qH;Yx)t zVGh+zLmCsOXEm0Q@tUDX`l;%ym=a%VZy_6|w*u$>m26c@orq}s{zg_l>+^r1i$)28 zPkf5oWEE^gMf^iGV_YkbOCL=-5P=&|n&6kvz4Sq<`=KCSBI*ezk4l{nH+x}qG5WTb zX?0iVdj-Po$A!t&Jm^(Kd-rmZs?6C5i5+kZdD}z0r7oQ^K*z`#Un5bNJ3el)>hx`0 z1c15wGsrGcy)vPbmp|H`$zpa*_D1-|KpE92zT}*nEl{I3@dBlFyd9iW?|#Fd$DRGK-&y9RKb+&`a(s->pps zelupG5g&hm`@HfW2wTcJ`Kz9bm>D++;$X1H#Zd|?i3c9T9@e?{z)0g5s}DX(;wQQW zn5ZUCgu940A~nQM!UPDA(5aBc!F`SsK&D-y0ye2>2~(?$oiHu-ahV zy%fQq@jVyq$G+?bmcd6q?5dZ7n78@ACh?7t)JDmOO1?<)`?R#`Jl*{!nSHsH&29qR zY=|CjKo~$}Dx*%aDS;VA&#rc7FGel58w=Y;&>QQwJqrhF4PXnY&qG|Z@rJ_xV%Fyi z;ubfXrR&2;!428DpJX;%gdsFAV!s@v);zQ-H>XbczdL_+kf{kkAVZC zR)ug#_3_*DY(K4*w)gcIjM^#FHMS9n#?h@D$uxpZ?4Qxy#fIhLvIOsfZf#fn zQ4*ZSQ_tm*_U5$t4^+%8G>gQU$X~n(TU?{=^XwyOJ1qlxW;>jp*Wh(BTo{&77EP#t zO*uLumMfxm$*=q#MC5P#j1g#ErJaohN5yO>`1dG6Kb)_FTaYrIVVb0ys=)Stbx(BD zNnf&L7a~@lA9%48P%-%#|FUz(b4JCwTIQPJax$lT)i+(B>W zY!?mA8=p|ZD+=uZG}>vt)B0*u1sHes+Xol@vJtAsu_!Fl_%{^c}KQ|WCUI`q@G?Nmd;{eAQ z;Kg-Sji2$wXdPN_8iU#RGjzYh;k}5ZGtpsy8XpbVLmzc~%-}uI+4FMb+*%9-X;a*m z@{+M8@0g3TTU||0K{Gtow0la$ANI8m18JMh)AlGu1w8pWz8UxW@&)@ij2aNI?LfTF2zs}qjHbithw;^4WFH5R0s(fDJ(ta+6> z6zjE>@G}k7sD&cnTl5z$A{u=Pj&;YK7u12k+eIouU2GFz(lks4&~T|kIprlYg3($E zN@pnPF&ZwMot(uZ-?7Q7L_*xJu@G39Y1)#oQ!*$s@*1G=I}`{%8P>um&ss2S`MlI6 z?t>nL?G{~Mg|)7zN58EdD=ilzhTeJPkiN~z!M(>7ozW*JSp5$~gRz|!D=X7U##`q_T>4-{qZ?s{B?LczkBn;q{K$*DED)ysPT0D!tx2J=^^Dvk#|IOH zaKE_1{21F(B?gWM4YAjZkT|Uw_3)kP?1%(IXOQ^aNiVUE@32btnpI^T=m*<}*$|f9 zfNrv}CF4NE6!t)Y3lmWRAMGhT?;1PoC?ARgaMD1XIdrggr6rOUj^;IJ;jcb?oMiM66f;{0KO6BJL+t+WKt=$Cu|V zvqfg4XCbQt_gAe&af-PE?#C)h+38C zkF@$W>MDUjyW10jB3jkkEGFOM9Ef>i^uxKL=fWK)O;JDd@d!~S&)qOyx zg3zQK%~RU1Ux%mbWc?Yd^1ns8Ke2uuSv?;6Dl3hSd3Km6T7K*eo^AQm*ReMAt>weO zSgyMV^1eZYTNQfqw>4d$ra^+%t8t6QW)o5LFb7z-s>};65pKyQ?sHgYhg0rxhXgdW zOEwUbl4`+96vHb)XOP)Sb>RhgA;lKM->F8k&KwmdBFE0kCh2%@s_NQBQfp;Nt*z|2 zW<=4>!Lil=4`>QGTa2`OV811>+-g0r+F7?ShMu1HyU0Log!xHB96RYFxw}^WYgj*u z{cBiZ{@v{&Zzxu3EowMZ%4j>f7u`ihT$Q<=LnNX=LHNcEtBAw%Wex^9lcUh;7cQn6 zmS;FVo1mkWsge-gH?v|Nk92r~!%dh+;+=bbnqda>A$K2dzx8}D%txnN2W**+e{C<5 z{Z-!oos57h0T|S($wYzu>krSd9hiH!Tn-)GOaRZRDJ$?I{xO3u2$?&m=smLelXBz_ z5gyyzrv(-UK@rh`k5wiPKgo4#KNFkhkQi4Y|Ml4qSN)#n$b@m!@u$D0v|`DSO`iJU zMVAE%_GmPe`m;{C7opJ`W{+}R^y&B!6LfjMAB}HF0}R>kwtjj&*t#3q=^AP4#ZUJT zC9f@BjnCnUOClvrBSZcO3P`P{I>nr2;8ozg>OAypyEv6R@aqNdC4km0>=F_A7+Hrp z5I5=7S5yw;ak@LS)#Xct=+ZXpCjUAv1{4ba5pyLC`8Vu?Uz9MHl=0~v))1P4&4e31Q)xgwg9A=(j^TX2@guCb^EUeI0Z;I1?96~m^{fak;M7c zd_)9lR>2N}ZxSBm5gQmxL$onMp778%$)MK_v$gwI(}mmsEFek`b`;$ z>Mg0i2D?@4^t7I0{M3v5wx10)(2-B(g>eH$Ls#PCNVtP)c=lRijQ0nMvLvrtzzdOT z0pqsSm1WU?&g5x+cSjw3V4F>Ut9<18I{FEX8gGqMqq$9O9IZgJYWTcXIcV87hXdBL{1pEjFgF`d?LX;48`I_)(va{0Yn zoQlT9QlotX=~g)mc4Z(`xj{0?k4}Iq<^);stUK*`dPQ=tcc>G1SA2q-Kek_kU%oaSnF44q@J^U^Ye=h> z9%D3~*Hr|3ZO$7Ke;of6?xsE*nGJx}SPV$qFGw1Ux(Y5x9(4Qj-BnMIm;<0JLkq#qC3Q-5-j!c!=8HHle=vYuvO>mL@yCs_sXgb`9vhwRJ(p*uBlq{L z#t3?)cE!&AUMVSKL`#*h6(`Mhp#;bLDogmQzCe(7C5HERXcvXqHB{%@Yer}K~42(72bRq3&^kzcb1m&?!k-}%7w32|{PSX4g>U~$os{W0!HN+pRR`q`&E%%-}=q-SQtmK{@Z-NExrXda+UV+u&<`SRDR!JvukI%9_zcqdV8OQp$@%XH2O+ z7K=2VABkGN*l2u}5f{Ztda^VAEJPXw_d{hye;V0uT9&g<8Yl2qoTme6;fG~$#uaoR z8Zwv}IJxO;tj8xVRf(Ia!Ps%=yqr0$dYx38$DWwTShIfGe%S%GSh0SuIqmRmU4=e) zP|8(+R^CT&rNvm)1o#zKAp4IZl*_h(DcC%)lJLBw(l z14h%LQdP$&9_@lT^(nQZX+=%e3M9R{nsXbOBlJQ)3duyjyS;pz{NZXHHjx9$)@brI(-Y)>?t5}x2Dy48b^m{l6UP}&r6C_Y>j6mZ%>k?$tkDQRm zW)YkL(DA@8I=T^VsiQW;3Y1j;Pj@?eNQcm*k&{Uv9|BsRo{~K;t0cZD7`dli>VqZ) zxNZc>&xk=48MCeexx)m3>AFywkmSo0$NB4g68G|N2PtQDMSt4}(hP-0pFZ16ZfVA& z+=1oqR6nSpksoXqep5M@u$WlB(Z#+v+2%fRYjy_a|TcUonPW^wy1f5 z^Hx^fW}g#}uZ8e7xYS-&)_ID%Uk$%DmY08X5MYCU$xaWsk!yLjhIhwx%rDO=uJ{?A z$TrX3;J6%lU7gZj9kxG?B^x)|*B0W9JN1dWtf(qeZPTdgdAUhiq`oS@WsRfnY|o+A z4n*HZrx@ANi_t`x}GJomuK=p%6ovX-S*i{F7aa29VEw_9%8Vb>=djl(q#{Q4M0M*u-d$b|luO^!1YFG5iYP++56Wv3Tkuud zlbeql{Z4Oa@4(g{7|I%lJ z7wahfEqB$ny$2lv4N01;^qnErx@|k&aW{?Sr<63+eon1RmE!eejSmFc4-}l;iuGkl zD;Xni!Ua=bH@2hWV4_nb-jf+FN|KX9*$QdqLr~`x5X_S?ZA;aS79v=e2yx+Pn1=2D z9_IxwwspJrzYI&JrWei-1yhe7bJde1utQVR z3ZFYNQjnd;yEDkPbxi#E)I3} z9Itj4!1=o!CR03tI4D6*bu-(6yNP1{R=#0muBfK+(pF~~dqX@ySif{`M##nLlGS1w zrxKB$c;qK3iUi|Uy~A%sEQ9u+=>3sKvdX_Gc3V@p{u1#kPdPzc%WYygUHCKP{zfyB z99rN=mtg1EkdtG&T(b@GxN$6YPxeMK*~+Z+Ic)IUI6^4}S{KGYtzpQXWv(a)<|1?}=6vHc&`Od|l%> z9YmesVD_e6d7R27N6U*AWk6efpg_hfOIJa>1YPexA=;$EjU4<8$n;V7PIZ(msV-aOw1>l+8=un%Szbg<|CkiXmY1#UqjWZ{p-OUx_f+{R*M3 z$^GL3v~?~q^^09JQmh0rB?&cBwE6UmXdW)~4k@25K{xBk`^CE+TqSWX$5r$u&&5fU z`wFjpi7|IvU9n}e!%0WV*5kE5*6hXzByqKQKNif>rNQi&3X4<|ovN7ht%#!ZJt|ET zkaCVz_L!ZOCeBlp(=j5cYm~=$J0eAgf}6wn65vN8OT04iC-PN+CXD#Bh`_EfZI9;o zb~7|A{sI5^R5EtOeIDtdH<)y^1^N94D+A5VC_b=?I$1lp=06)raYfpd#ymrRZ+LgPHT)jVe#%K-ckp`NQ1f2fw08TD$CKcnuU6pC zFOYFL289N@_3`RlC)jW#b*g*>XW!Q;jNl>o^u%3028<=Mmy{>p1^!52_==t~VR$X{ zd9B*|HQ^f=do@w64HH!KZdAuuV+r`T^dwt$Nx_@T<1QEr+R=?KXdeRS?^w9^Ao{)Z z&#|2U8rdttm@~=$20wk8_V)iOWb<_6=KF?!>_0@ilO8s~goFf*-x>vqpki!O1dXM7 zCA9$FL&VxTKi;7n&b{X?F}uQl7k5^Hli^FR2v?G%Ljep_fz8xu57~d#c`)fXx0%r` zF{@-uCN7O@SsUuS#Zl4Xtn`MVC7S+Pj!=#C6imxC4=mx?OWloD5t6(+j9)w$k_4Th zb+Gtq9csu$Mdfdo51KO4AZ9QF>9^%>ipyy`5GX6l-VonSi^Fq3kJ2Usc z{KcUH))kPn<^WCTt#JR&`K7tueA`rTvqPkY{BcYx0LSDhWtWVZ=e=V7`Dt~> zszG`DJLuf|Fjh+~&~cyFx;chpr=n1x_T4SUbMK(M8a>f4*BM2_@MY52hfyPp=X6qg zTi;$))tDtd^8)UpAqRWg!9BVV5_*ZjEnZb=Ah1UbVp27xdX}`3pd$n>zX<`2khQp} ziuTu>1}r=Hn)Y{IUSWdnJt+^C&14hk(5_!(bGo{H?t zJo*yO=iRoMyV{DE^{0KBnmh@tY0|`iSj3LKW9ZVQdd%GY&D%Zc+vv~sCK3onu=KTQ zpJmd2eAcU@z_CSTBkaAM$29X|1^O+i@#I=83D;;Xa9*f^g=;Y3?E|;4b?a7j`=c@~Z^bIpf|V&WS}C?KTV@&$;Tj#rihBf{5|O4<`Q}$QC_}82nTZzWYvl zuIzHV3sNG0!A2gIe57p$-RjMbCkWbUvAkM%xSD^_miK&J*; zJn8f|?_%~nvVA+h-(Ra8mriGr;GRqUFE4=nAhv?#WKlmK^!9Gizss`?5;8Gx3|jxc z<;!@j@VfVAEKd)gM4y7!--9tL=|U1Spxp5G6jV?&eg0k2I8whgdm4H(8ft_M#b04F z)Q{Wxxx}r>6(LutZ+Y5j(uz%6azkFeGarC6W#yJE;au$S!B)&4mpzv*igRmQ4i(#3 zvG~zVKDDo=O{7FP6k9QeGgsW*ZjH7)eqgHJbMTx&4IcqvO>r|ioW zuaLSpV_?V16&Zu@K%y22p`t_=D8HvX*5w%-lU-}B%s`|m$S33k4*>-n30>>y+rYjk zqmh&_@0|u)_jl88?+`gL5sYumV@60!@tM?Nn|r!j^or<6bR#qwZ|+ZV}nRvg76=O@ZI(v#ij;oOFM1vAj`61~`QGg0kN8kz{e+D7%M zlPV(>2pna_3!|GeYZdxvA&t!eT z{;AGF1B!TBMsDLMRgJ!P37Z^_k?-fD1MUG-?NO0vD%6xWG;s-@!4baCX*-=X+%#tG zb-QGGQ>{a(`|l<^8dbTH)fnc*n)cv_Zc6JTGmGT*2<_r=lwCP)nm?{dy-ZBA#LLjfDA4@FSKIa$~`@>EAV^?Y~vf_n>_pOJrT#U zsHH8m6`buQVdC|4pmwkMZwQRUwM=&V=1CUSImt%?{ls~p4c6K<)=Sra59q;OCz0f_-iSiEUtG< z@1E5pE3c25{C~x_vJkG8OslTVP;K#H*UwXNd#CLDH4qC>5N0#8>ALP zub0auDtHiwee4ZYR^2X^r z)n4|!wKeFMd#GWdZNjmHpw1g-XV;T{(6t#zT<5iS0a*t2bC-MsI?12v@dyx%(bxg&hLLAmCL(X~ej zCVgS>bVU|4S!6m6`CIx*Nr>;|P0D^)<3JrfYYx|$AFt|4@o!-KQ0oNLO`9rKoevn&0_uRW${rAUFscvu4FpTW#J+ibt2Y0hU z5tBLCjk_Lx<^8UukC|^jv+20ChDv>1GY4H4RwxVA8|({Zo*ggyWpi1BCIjPh(&H0* z45z3ii`S}f`u*mmAMPi;;U8QogOK&6GwUt>ug(;-SdmySvcA zC}NGCBXjHcML8l(4P3(nt_htU)_bi4#$Kar=@boCMBNhyCC>OHk))}2k<;ere$-a| z!3dF+;Y#sO-6sAF1c^zBp&KT$`3deeaL$X-YVho%EgUO*B<;*R@pFR(gV~RiY;x>W z7tV~nHrK6x{Bibc$hE{oAnlO4QC9NOj>^LmtNUS*7qjw2XY@Vovs&P-d9$X5il))8 z+ESU)KDBU$GoJVvcdFX{oFXWwp-3d?hY)I;XV~1 zoNM`Ut5qV35HptQV+*5Xeo2stW5QX>u zu0H=i3A2O{wzfBGu+H?c0#kViGvi7B<=* zr3n5m8)SvGy(db#>eu=Q1L>mjFGdg|8)Xr})08#UA%(khgk@-D-|C`UIScib7%Q6a zzAWw2FgEg@v2rV%Me>Tq?`YcCjfx%#TYPc$DBmWzg*4h4u!|Iwy=wHe9Tf>{hcf%O zOu)R+`u0)BTd#9_&GwbIt-25o4OJfUBdkr<=!DO&0IxuuSZ)4}wj=UZE?z-%rj(uV zh(b4E(VJT$?TpMu_f+SffEh)pCuZ_1MZgh43xtb*E9kPts?@9R#K7#woF#jT`UFT0 zGOMwTmlU@xNQO4nzkl$#>f8;*oNZG|`#E~`6V_46q#%$i8fX(cf!Fc0eA3<4tk=ibA%+^X8pmvcPXZSCyu1sh|5ITp)OARS(5?c0BW{~FtRzK?SMi9-+OszByTd$e{1^uPGfzW* z0(?&)NZ`cbk8oDD!!8TwT=XAg*i9~#mkv8KQ>1Ugyf7c@JU_(mmzxq};E^6lOu4ir zcDotgjT<3zN?vW^8^7W!8i;jGM4k}4f#$g0~T?iNvh zTSRY`|5gYYC>gX(#Kdk^J|PsCV>wb72KwU$)?UR-Zi;r)E0%|q^pw;THJ9Vx8up!^ zbBN4y9=HLShEa<|LxZRNZ%*7nC%og=mk*3U@{5Qlyb=e;B|P&44WdPqJ#WIl@KPcF zXzIHo$Pc{$|3`-P`&hxp*<@|$b3?RASZ11JuJI(EFK|ayb_bFmDuPxJGXE(X~+md0eXS}8= zThc6f;fPY!!34q4&9$;cu2wU5PH3X&tsi}pS6E1r=0ED%T)I9ZYVsJcZ13&7(0GHd zXRVOFTT8(bRN8JkQ{{sDK<(|Ovz(rea@aN}cn)vXdAGe5T7813jnXk0*n!&rghGS^ z)cnM%KM7l?~N6f)?D`K{SI$dNsTuc_(YMo)0AEgBHdmO38AtVFX_w z?_K}2N|xxr%R3;gYpz+JPpj`tYu*JMqD36N3((+QhAnMA6M}sDT|~qmN+Ao`Ffn)E z0Jx0FI9GaDro_`T5n`7`lyw#%RBrV^D6QOoS0`Le;-7v+@1_IANrth1YNwjhIpR)j z{}Ilbn_JJA7ODccw2`{gH^q~pNtZ+ed8?d>h@P_>HQ>u5NqmUB%bc~l7PldAWR~UY ztJ6rC?RSq?j!oN-1)Vx-<}vH#zJG!V4%D~~?+$XZ`(;gk_-4Hjz|#E?kE(MMVfp75 zdO~0c{x4`c3UpKN=z8<)E^v(()P~=78s4QXgD%>UJ$VjD=+Wz#Q*| z9$1X*Co~ZNZP>qwO!=Q~dIL?j>+%2B>I34}X^_49g2y0)6~Eu-Kx<%^+pck!mrWAv zfi}2GhBMCPCxsMc1DL|0vqHXUbw?gG4!fQqxb2Ux{3O4xO~|XLVebnGEdWEglMc|| zB6|OI^$vLO{#_zXdk*hmO2FT*w6*u=U0pSYS7HZyYSCL)y~8X|3-Zr9Uaz~U!9SyT zxQvul&JL#wTm5{A(}960S#&GEai&kztCQM~#r0-rs^phSc_WFNKfwz)RC;5;^>5ub zlVanzmbMxn8?VDCO?>-e!aXEiPFxHaJ$=iunq%X%2Vb5B7y%GYLuad&(A{6V#mp=I2hH~o@r;`O^v+hGf+et1HG^Sh>*V$ix?=N;H zg_R$c6=|CS`%&~N(|G03)|G03@z3r;FBoyxP z>!0pAvbsi*nVlWQ1Azx;We$=x%XUh2&7jPEG=xE;Py_IeE)1FVIzRH_GL?-2=^~?8 zj`kDqr1nEOpf}n>H|6-wtUrUypcBB&HRzvc$@3N4_Y*XKP63j6$^Ip?Om1Dbv$Hj7M zY^JA`H#Y;wBea;PC3*JQxVJmPi`OJcfWl>8cT#g(aHJkx?KLg~zTfL#6$FQ~sFD(O z_8Mkc@D-H50(8V(h(ZGK)Wba?)c8xQl6Q4WBs0v3-H3Wr9FI!gpq366C*pcRz`!eV z(UW95x2xE`*`m%5urr)KZmjyU?rPF`wi{AmHUU-4UwuV7=^|QhK77{h*tO~d6=<{` z!W-M*tx&#aBcQdju}V|=jI?6|^9H@-uKF=2PXY{yXK+XmHC=kz^Ma>B?pOd&JC4nS zXZiGh3K3sSc^CqZ5i08A3t~j0EM9_mkk=Lzu1o&^!E-8tUxTPN9;ZUH)^W_AreX3M zg63adcGdzQiPMq{_JjM#z`|&mvb}9Pe8{kFdqhq?-D3t@>AMZ&WOp}Y{6A*od^cXP zyv0gGhM|wg@5FqLT1O2mNSJJ3??3+$$3mS)C~dgUAz>xY!TA1v(NLiOA@eh=$$lr` z8Mh+|03VKc{kDwU5`|N`1MvFqrKZ+j-h%Q6a-^HLVu43WAq>BjI<@=^;|m-_(xa>= zj4I~WGSNV}4TVnu^PaZ}g-998&Dy4pgUrk{-mRIIg3&!2$b{-}qtqs4uX!AO$3-H5Rx~W*Mz?K3!{!QkM%0Klv z^r7-;aKSM`eLB|r^3Brco23!(T|;HUD1Q6XF&O)wXgafjvD_H%KL9SI`Uk*mi+?d~t$A|1W)rzqV6=;`{xvU)^LYlNIueqKEYbAc^J2D0{ zwG@ePtNnoE@E8y}7^T^-n_)_5w`_JaLRHq+-uVywQ=9c~vi&3F5!jXcbTHKMHBUOE zo#)Mby^LtHK$VVs>v^rrudhrS@u9y9^I=Tl+s)`>LS2x^2yky99?|AxLlu?g=5dySrO(x8Sb9Ey04j+fHzI-MG8E z_ToSHp6;%?Reh_wYrO$4tTo3R_6c1pQG5f!lR4%e8cCTS?!nRaTljd^7A`Z8kkV`=2mg@A%Sh1%AH0Mtf0z|<8NQCQ-hSt!*QBf4 zvxfXu3%F)i=C)=VA3#G~lyA22>0f6XrClSw0QG;AIlYob9H_6Y)DQp+=Rb_YQP}PL zPTXAW!eWMmY&*g zd{==W9GG-wP1ojnxy)|u&GI>^5KRGD68m?U3?QicHGx;}Iq@ju1-~!lXTK@xZ-6@O z85Ru02Oj`nY+RF}xNn z7y-FXC@d(old5pWa9&u1w%9*cA3qQs8<))*P8ftK(a%-zyF!LzJftY zK%blcSA?Z2E4Vege^5<0P!YB#@76s3(5hm$rh&L=p{&06PP7s!C4jg9Bi(R7>pR9z zg3e>6Ad+g~p`9{hf(il_2Twf%y3Q14JPK3(cXc*`qikd!{7~O-TQr2tKA}*0 zw@%ql6bl+Qw>j~mgRvR}0y1Z6oWa#gT6gj;7R=I5t@*qyHaA)wdWG5lT0;TrijQ9$ z_HQK~T7+I2wJrwWVk1?QJK)R*EytFoln6_3HQs#X*L6mop8vgG{#z64gOCj;m*rl! zQqmu1dDH?g6{bU!F0o(dYAR<%7pxv?0D_zJ7ajBA=z*8=O|FOD7Pqry;uQLR$i0V= z#zjwc3{#^Xl^x&0pRg;Lb{!#0xvK(Gk%kpJ*GFT}wj7EXHUm3+c=GuNA{H*tUC$!I zC+7pz1`iwMEU6~2XhyEb0nB%k9rk@scEmNribdvkkDw5u|B>x89wNTtR(9i zToy7va~SI=B?-jBWEze?^wI@5V&JI+Hfr2=Nnt@JkzOl`FGUAz?Ax>Myn6TOP&q(oEgvd^!=gPO!jzIklUw}gS6H1s)Ba6H-bqQ*trb9US^Y(N?Sb6(hG!rpuPlF< zx8V{-x3H-&T)_J=P~iFYBPS0G(x4(HAwXs-w01d9oCb@ce^0(VMTNDwF>~wpbt}|) znX%)mp}V^$YhP30d>X<&K5tL6sV6$#!{wY7AnGmZJeQ4Zc~nRBW&?XO(yQHMvOMsW z4peb8Rjbvk*_u?Y+RNw9ZZsMB-JUsN+aIS>5F?Pd;yztdjJjGr<_S0-vU;xXCLgA5 z^txYLXlY`xQhiE~`pIQg|NJZnX~Ir=Jf^qq9b$E zYb(5w_qSNymsu**mOy^-<-UTChK0=OVyBeaV8bN#pFMKlYszYjfZulZ zXoDel`Ia`)?;@yaCsaLY3u#9aw(t3LM--{=f6}_d7_FlJdFXT1R@Jg17J(iVxZ?ax z@!EHB;XG|;`d}|oebUH)m8ZH0)h*TdqHudd^&-8X#C zRCX5oX4+uVEB=Bk>KEju4acOqqW1lOH{D&T*x$G`7vy@C190#JI2J^_r&%R5L^-b? zT%ttga%`H#sP1(NA@Su!*e2FFkM&E>eoJ>sP${mTY~sn##F;o45w_pD_x;trYL8nS zsH>_Db`oj+X`P?agxr5=a-Q}%0QAoC&V`O_*UKXbLcwZj4_ObS_c+RtSN*WGAI zlmyU4uA8oQWgBQFz%XM_*l(xtJpDOW=dQ;Q=Az?aAjJB9uUEh20u?mFf%txz^S6;~ zmbA&)Z73_0jl^=qhJC-^15A1mb$VI+9ux~yUT?m4l-0~wkSJi_98pihpSi`EmS zlTFU)%@AbeRgeEpX#{*u#^XGc#eYrZcVB2@>Jq#&Ja@Y40ttZJXF_g%ia|^Tjxv)* zrUkGN-#Ww-9iKvN4es{PQ#ewb5Z$vEO=~xlu|+CdABG=`ZT$epZfDK5pZ|l$gaiuHGy*c%#yNE?wS&au?4S#)=^*fru?==wMY5jb-hgWb#I% z)Yk^rs6DRs1EMl*e=p5!TK$@Li}i~N*V;sTP{n)w4>?{)To3s{JW^MvbJIfKj@9R( zQtZU9=G{Tt**}7ksKsRGJ06D%8Pod>p2T{^R71Kd%GtO-{0(LvYZ@?eTd%V3GDavZ z0nO0+csbh-6i?FZEoOoev#U`dAE$Ihq0UFhN#0yxt7Nui_ctf)MSNZ5Kd?kVADZ-h z8wpxTDebX?$aFh+c#fYz=%Zn!dRoyc!OMMM$Q!buE;1asdSLu^@t2UrGp-H z!4x-q?pOqF9huDY-#+IjioDx8Zz5i8ExrZ@^vk@z`>tHb@pXJi1HPN$H5&}BxOJm@ z&ZDW#mCcg%Oz`YP8N5k0v82ECf&$N8eAP%?1VIt%|13R8G0euT0!0r#k;43ZTDkRQ z!tYFD1RB%fFK^kFDPQAu`$OMl>^XyzcQ4>ch2cuU!|rDLW_tq+KBIW1i!dJWuw4ia>|!#ntze^xnaxcpgth7xmjFv&q>K2#sa9XCgMew!EIG@>Yx%GC`K{$*L;+{^OiQP|=9 zbAG(UWxX`3W5VOq%2?S0ry8?O@8ab&kOt;m@Zc>gLSYko3-Th}h*r$t5r)@yRpWwa zoh?M9-!=t(7XXkToVT?aoJrri59rFPST-`QP3c~OGdx&%9p%T_FAy*iux z?^YhtLaf)rDjMH}?Y}Sn;3UUSoIn{X+)EWN&Od2fwM{Lxs;YOqZ{+tZj={4FdpS#h zri>+{8ePcp)sK*w=l8Z`0SAb%s%sN3+i?T*bbOqZa>6gC z3l=r5ZTBUN2{3ZJ|0QArcIafOWfl()&wcC8ira(Ll4n2vR*UU;##mkdjA~L!apt?t zeBjZ6Mb!g_P2I!wNu85D`j~xiGDA*lX{ke_5EPDUJ%1+V5KMnO28j87fTH)@z!NWJ z=l$2nWl=$OuhB}j&OxrTturUNLi%tXz0+i_SRuRPkDuJVBjL#Rk=XVEov-TWuj$L{ z%lI(Pu|gy=@IQ3;(Hk1Ve$4l{Kbj%COa1ZpcLg9iVO{mlB+8VA`Y_D*HY4C%f&rsD zgVB&sLtXRN$a>cAP%3g5xNZ&yD@{5l=eYZ5mAu8--hvo_Sg20aa0R{f(UR1O-<&gx zJvTL0j2-`0R`uNaXLGA((X3a?8*A)% z2)2`M6ZQ66=-xsSpBB`7RWAN{IS+c>HM_?_V#3!q4pmx4yLsy%ddyV8CrjDK^ z1pR-=X6mokupyPp^9Bryug`p-z^8w}8@#Vl90^ArTaVift%N;~xvGH~TSKn4UwDJr zmv?X;ApY)Tc4CnHh#BtX4REd=X3BBy!_EZ9x9?WaDG_y9@lb6b@tzl1meUaYxfsDT z*7kEP8Smh7M*OE{TV%ZIRvYA$jvfBjhRoAH;(5Zc z^0_mEAMdJx0If(|<}bQ;?<0O6?{brr-fuN4WES3cQObei1f|@+o&v9=QMbNr+GWW; z*O`N+)EyWj67wOHpH1CHMrEZ{**x56h?ymrXr`aq_i}%! zYk(c&8^l`@iyZo8G(u5o^w*9v35#Q#7_l@$vIJ+qAY%5PbbI3Kmi3{*-?@0BnzUIk zf^wP?t6tJMd|z4PnS1-Vs?N>u2_4>K3H4t@WSD{;{+i&0Ut}ayMn~&Hs!jad&KaK% z`w8H2O!_SPKHuBBRxwj?HHA}E2r<(8GHcFwaXC`ijwYBQvwoW`=|CRsGlKz87-apl zkGekpBv+GvlIvR%lDemd8wfNwpcM4uS0lrH=o_IM_LM>{@5}wfEp<7i1#}nFL({o) zu49Nn8;O8wDlhth_l!`|=SQqAEQwR7(3ZEJi}TnJ*u>?fxnz7(sGY*4AKthT)dPyl z^XKkhx9&K#jJtWNrrH?Y395Re6nxBIkVxzIce}e(?T2x-86%W7`VW3bZQEb14i5j) zp^V`qd65eH#O-9zP)kQ+^w2+7fK&9Sc|C7cDysNS+|BO_CI__{nFns+ct^y_?@2>nJ%Z(6+)={(_GuR zm68=}W;V;uQ$5AR-6yjK03R&~p-{7~(aSa8aYKs_FeeMOp3Iwu-sbRA;l)SvEw?FQ z6h5h({iu>|EBghNc34wYr^Hq8KTzq_?6G$2)EvDlqo&CXPO|3 z*%FmHtrBZQKQK2~wdZdWEM|qvn_dsDVL@-M?i^-&>v+4o0-cH{!H$9dYv7g7WcrHn zi8qeE?y+JrRU8=B6reGYq{Op)0^~Mq2mSf$`>!4^_6hr>giF;AET~z-@)6YmRsEBE z?p2eXiwSdc?UgZ`-34aKy@fX{F+Ly)KkX=?^FAPf<)jN~O=RCct<-hgM!g(+NXCK& z&`2V25))Hf>?&aJ6q>4HVw+e8n5mOE6SkotxwHc-S2>9h^a}HoehVURszt5_dBW0i z15~6RG8?qbv_c?dXito-j`Wzb-o`%t?T?#C5p}jz^yp~z?SHTTCiOmuFn-N&8E9%` zL6cvyDbs`=qbP-MtHNfP zuW&myQNR3Qj4kZ_k=H;%;#^fr7}JunPgnXLacXuWzA)t$^b~HA{ddc|ylFU1Q;RWK zM#TEvi{+oIHA_7Oeov=qHUfPI{A={bfQwzMW{TPOX65hX5Sp_t=f>-fy{_m-kbt=` zKQyw1&cB%Yp06~)^0Tv1K+=4>yX!KHZ?N2yY+Z(yHCgDG>l_*QuI z_7LXrc#B`#ZR2mArgmiXPNN$uh9o$inWpv8}M`x>&#a z#l(eMOUu=}IV8IhlT9)NIm4MNypMYz6%7wYRUiLB{8jIWW6^=SU`)!-*0*rHazhT`CbJNx3k*bvnyL zVF9!>zfZ!`Dk3^N;dtv{jF>B%HRB2>A>JInG%FGq*oa82D*D~^@I*Diek=cEZ}eh` zW}PiJW=SGxNh=xUKE1?8J!|)2xWoh0y}uNyEcDP+`7Ht3@0Y5tJIJ4cj(nW!n_{2?c6%}=e*b2Gf@Az2#PC` z;vLAWE;*JjNVEK)wpA>+eIRSFp7NXN1N{#**a0P5WnurTVniyx=Rfoq^I0VgT58Vy z)f8s6wF~TX-Q(DWhy6+@*YW8U)Y6}jxejDFg<_7APuplgr%ho>Gm~j`%g^uVsU2Jn zrwFR%X#ngz-VDF4>Ns|XtQE#iP zLcWgZkkV&kSfQV~^sPmTn`k-D{7gb${&Z65xAa2l{ibWbrl4`cDNAK(*zXyP7HXBH zL1%;rF#EL^kCm2-B{8t_|Hg_=p=DCLO#1+{mu++&n=zt+#gq5P$Dzq|aenqd@Q6p| zDOu_sw$!su3hLQ}+W+i4{&3%RU<|^0a}W%ogLR?odx)-h*uMb1Vjtm1eVAJQ;TTf< z!!Z;EKSEVH#D0r!NlgwSB%^RW+DK9~-x$?k_YOUuTA8qR^dyT)GDnHh3EooI19aU3r1Wepu*wVc zj8)}%gnGf5!ts35;PTnSNrWu_1{Wa4ffzUq3||uaQJF&9Z#!PNKqSui!n#9lbV6-p z{!30sd*JEw#-r41wsZmb4==U(=429)it$q>WY(v-(TTkA#Us_&c!;A#P6a7*?~HKf z5sTkTGQ(SSWw^Rc-JLBbc-DpT#x38`%(Q*}+GRxL=L`C)n1%jV0^ivXs}oWZ2( zwkfNWjssp%E1v52OxsB@&unyo2SS+hQhzxgSuMh|BXB>?1V^i{IpT*Oc|;r&=QHK(>#v_s5heboL~R^;mC!^idGdd9C8wI(2=xXi%n( zHELL={RibM^>}6Vb|Br%DCh#t&%J6(3Dugd>u4QW`mxx;kT}=>CaJ=s#7^rV8+b8w zfg;aubzWCOyHo^AzW>RA02+*|JfO~+P5`R3i&yOqE46}uX4d6uBX({Z2S?*#r1sZ<#-&*%@i4v51B-_+tn=rRJS50hQBQ zhpAy2d_0^cM%W)Y{plaEbk!Us{BDB?>HUJN2?=Xl{A`2x8_yu^gVA1-K*8{`vjz;Q z!kGD<i(`$&5d&64>yKs$8>F6(0=1+bkG zs~&%VsY8bv2yS1cSa|!cMs&NF+r~S)!&>f8BJ9|UACbnivjNbr={!S5YYCxYj)gs z+hgKxC=FKs;IQ{=P=a?_fb8uM08>^IXGTCE z9%3KS!69`>y9Nk05iVlA@CcI2Nay7rj+8TdxHFS++E}j);uzjf{>YcUbF za<7hZIIW2C8dFhoQOd7`OW3zM+0+&Fc?PfivYq5B(NG5*8imXww-hH znY6%u4Z-qG~8lF{sbFtweed?(+Ln20Z^&(a8_H zS&~wB012KJ!L6+yAO!^K+g8v)!+#HnaiI6&F}D%(C7e2HJ{ty6NU0;o(}#&fWW&wh z&w{#%?#5BxNt8jyNzbPZQ97}Gi13zpirdxwc`#WZcjr#RUZ(qp9RbYl8J4CP#w~*H zt(c48D<4h``P!)CTV1QJE&q)8lZVC1y?^r8#Gwuo50*vpCNT zW_ej0H*!fdZ*N$18*`RKk%H60JpR+pxjvZ;mTZPfm}CUJ4Am;JjV3`Cj+{I^2((q^ z!-V@wfAet1X&V449H&#y1Vcw6!9{})gwHL$X41}kNrdx0pSg)i>gKoSbS@uv#g2%# zwyDVGJ6w?VptNlJ7gFA&flhM7ehR+dC#;)?Puo(Pds$zQJJ95?E9HekAW zCi)Ge9M~Skq@gioF*sq{zCMjhTw`SMQFv-pvPS%$oabz`C;flT(^#O*Kl^AH$K&`h z^ElQH$cy!UK*dWX%0KGEv7xnSXgz;+WCOraq)@@NOt13|s&GIPm>zESn~4yHdU>+h z<>x7cF#J4TRzUylXkfmif}r+2j8>`6nuul#!+c#pvK4}DvsoFx=c1$%jXs$99)+tP z;X_>aIWnm86Y|`3wR#COW^s)QMS-LVDK$lvG98G_u#&!@6{w0M-TTh{f=yARz!?lB z*nl@O{c7m5!wmu5VwE7Bhsof+!cB&Bch!Joi>S({s`f423=Gjr-56A~DX4?_y3zK7 zT;GvYvqI2NHeRz&fZFuVmXmn9ke zG1?<6l=%76+WX z$MA;iH~oI9>PzXIKPOam@Z!?-aO+s|oc!T(!J(#U=1F4{N5YQY#3!{{Tmrt4`AIv0 zy1zLRQP2(2V``~nGF3ceidhkzoaQPFr#c(6YCnj#doq+o1wVF+(-S`!CcdJm#9mQT zj)@x{{GzYRqkSOd3TOmpaAv=*iCs*H{$qUAIsi!=Hv_W{gJw8n1YmVhu%7JBcUA{~*^f`#Z3!z6sE! z{OnXx{+3FQZHA*3vyix=?TQY<1KWt2k|p`)L+{6L9Fx}BBs#TX&)X>1tHUAIV)&~< zpvx(&TlmP5Wq(2X91L4JM)g^6<*>5iHLCQT0yV<+8F@xvzV5pz+I9j)@BO?w8754h zTGNI$82#s?9fWNf{hnVOeGio&kNiZidy=zy zmqQ*}Uz-=F*;x{k`lZgHc5T;VD3$v%H$2+Rcc0QI%$_oDepi~ie6Y2-obgW=>M${@ z-Tdy~G_HgM%2Zl0AN970%tzjJP>x%ffp#6;GKb!ES~Kn3-JJ9t>|1SL3qRiAR%vL3^RbLF9bQh}a`4+S5)Q-t2t>Ev=Xd{BM7c-oWD~%uY zVFfYyTKGrVKMp+LfpVKtQ_fq*csjzTlX+RfJ#{*tDoQ6{os@C)NafobSunH&F*?<4 z52NNzDmS6F-wi`Px0^;-&mO3giMqBhev8BPRDp!yk2 z>0Nu-KM^7lVmse7+P)(P!@@kzKG9j>5w+w#DiSsQAL7lT0F%gLCrz_gmRDJ7>Y(UN zNwk2$Sy8T&F*T!XcR__{_<`i!{6ZUeIk>1$I2F2)ZiG#V=E@Fnda8|x zX|OW=kWaZ!>{;bdzl(DVHXV- z*C7Y^6WMo2O0!kk>u1Ht2F1xRl-p4S5TF^S(3@r~xTbzmCG1SqvAbKl7oBy%0|M@c zU7z>HKbf@y^$PY?>3OS>RPN1daqRILr2plH*B7>@^#GM!JsInUXX4HYb!S4x96;Ob zun~;&VTK86Hk$)Tf14v@D5zA*H{BH1{6=0?z6UpBDOM^!ADJi%+M<7ye`0t-y>AQj zyW3?$*Oyc~)drSgucKvPe=Pd2%lybdf%Aq=a-1JJ?;u_Fz?DdnOod9hRF{AbLxp*Z zJh<^Q$uE*~k5AJ%*5g~1daSz}yM9isGg_|V@aqLaQ6C(hmXuz>&>@Jh|kc*Zs0A#XN}i;O;dV&geAM{evLXMx)UTwUaRaw0Y-=u&l7@Pi9=DVk& zPWhH*+02Yt-Ss=rqizXh+UXEcQ$xjP^SHBcm@7*yZ6#78h2KrGXoCowk?1lBL5-yJOc{ zm!ONdCGg~k$`6xDcM09tBg0)#(4%9Js&V_xrS8k=ZbWg^mBbRA*}IzMxzek4od+0} zn3Nfj5Vxf3^G2Vvc;53mCf@0dkEF79H=Ye~jkSk95= zdeycHiVq4jk{?&V9KS+NM;5M=SIn4S;y^!K%R#qJZ=N^IkN_Ol}Fkd(`3D5APA`4k#iOeE-- zrmKpk+r5??3lno27auT${$RMqX0>90W=FSaI?*lk-k~x(8V>)b7QkoW#VXIa^2JHB zC1iV9_`Mky!wN2lQBa=usiR&MF>*ojQsr#(Li!OU(;nQ=4W<@pUA(6P$zpxy_J)j$ zq2aRZ+JKL(3zXV*0eD!kMYfZd&>SMOh)2TkBjHskh-|ofCfF+S=$>yscCi$H(Iu-gpEjvJNIkRY1lx4%R-N37> zKqzGSV|-N0eV^9vj!=vbt4lU)LvmRsR$2cIjBKa`bAf@3Fom#~jIGZdrgF|X-p`Y5 zD0CE1r~?{5Mv0obIF!6G)L^aXh(e(xJj|O97}zPvTrcBd2}hfUWOIE2+z)+PD}+@3 zar?%!MV*Bw{?R9pw$b(%IU&*`?hUTSY9SGF!1xu=VVBM#t{Z)xF0B0cb5cROvY7vC zhR5RihMcA~z~FPL-lP{-srYOr&Y?_7NlZM;o0&v66HIM{--K*P`u>3V#M|TuBwIe~^MAzJ)^=%c)yynk#q0)scFk0g-^ze&_`cZxd`*Qo>- zfXn5z_Et4lMUzhjEvrE9*9r--}%rP2O>Vi#EGlVaM{CG@(9?NI5H&S41nv4e;PrQViOJ`oLS zHzWQmpd^oD1cdH{SI=>vq9$m>%$QdiyymY^cj4nf;iN{SqsgQDv)u6yZks%Qz(z=q zX&5o(rH}c`y;#5Mgj)Y^{<`B1%`vuS66S4lvD!H7%_k)fIic;#nCL?IN;y#?lFp2$ zEH0F&$E0rp3tV$nY9s`;&(!771O zRgh7HA>Shr&g-emc%;gYtZE}DBAuH?@gbDZ z?PqS02!7R0T0f{-tAE9q%280M@>OjcG^m*~ur{5G$z`ruyFm9f{K+|K(A;?$UG@ae z+y8KV87+gjPM1tGXvBNQF*X^!wKg&$PsIFun+JkA59UH*)GvmbjQGa^#u_Lb ziP9a2N$&pQ!#eDTa$L=z;S5>Q+IjBiFgyNnb1Ye1yIgXmn_0u>YC4j^jLpw1xmOWaUkVWN`71JaC21`e~Df2 z)fyXl8C!zsQjl3AWc?P6>;gzD4*(Mo_YHbGhW}iAaO*rWC~kOv zXrUj)zbjp}}{7Z=eD)}Z8? zk|+v>1)i!27UEu~c=0Lp2Gl|c=(zlkEUP3acw&1PW&j^7=#*tNxv2p1CZo1lrU`fZ zj?1Eluuyj+@wF!EdHzw%qA%d*QozL7D6H-z9(9sZdWU754$#%5h}W&;u&xM4(~PC8 z7&ffjXRkU3sOCTQrk!JGKud@*s=|IN39@t_tq#y*5EMs_o6a7ZqG=wB&aUcH6u4l~ zTMX1`LAvo54KJiTxoS7%m2>$6KDvOv)iq3&HP2@- zXA-wINXT7eMF}@_11DjE>NF=7Q9Kqzr>h9Lf!-mJQ0+o}!9Z@L# zLPiK?pFge4yqy{C+Ojye18t{Euf{&r76MIjp*QY53aoxR$G$z?G9b~BAO_t3a-@tR zvSnhxl#{DM7KUYoe?eAdWEgt8JErgN%YbS8$Yes1!%oB5mF|ubcMQR@CR~=|C!&jW zwS;0HZ-$sW(w8r_SfGqxF7lgV+F95nTp|3k8?7HF?d;nycI|P08}tXB;r-mt3^KYq zO=yp%-CruDp1$uA*_}~AjW@6Qd1iD?P8*U`sdD;sPyiM={9=-uJ}nn9t0#UEmqWYt zM$wvvQf9fL(TYaJxsZt^Z*z~@d6t}W-{*TlgnTvV5wr2j5vUQfg^9-9s8SN8QUlq(xv)A()rdU7qrNnv7aHT-@G+z|B%G+U~oQwsiihHZuq6=vu_$I&M2au?uHzh z{)!(6u+*-$6y-qI_0zdb3P;lkF(02WHOGEopVZ}NZOB08*85}ag@h9Ot;5X@hvJ1Zz}65t2R9cigcM)2QrHYw29 zAXe!cW}ikJo#K(f=xi9rbRX*Aw^S4ZsdoH&tqt7m$imrmwhXf)hv^nN>h0pWDvsMV zqcxnn0-V-j7EGtxy^2&BWT?nM1MyuM9qjdfP20%lNb~L27Q3%zJ+f&lM)(!F#)YNo z)*nrWgwu*)K2p_`#_x&HSyL;@Ul=yp@4N?HxWWtwS^rohH;03vNB^=(Ly1+nw(XmL zM;~PfbqT!%HH5<8L*PX(jd*TH@iIG+gdh7sW~G&~J%wFTTJb$4gF|_h<|n^)@EvcZ z93B%=!9kin&ZZh4{ON`x)P;TM#dvB#N03-)eZ%r)z(QS*&RB|FUdYJz9z!! z4J%$XD)99wHlj*C4^OHji&RP&?I{S$1xwYHYn*opF_Z?OVIG~nUd67_`46xIqh|I2 zmj-Y)H^g`};UZ_yp?JkXiNfwC^1tj<)Q6N$?RCELDm|A^c7LLzJ)@72c*oc^U`XZ}r@Lv$knwhlko%s@)Xx4pDkNtLs<~>u)3cv)7wq3z zYFZ}hUHU(_Y_?vxOek2{%&&J}vToC-nye+^kf9=#s`hyVm5$NgP$(J$TjM65ILx%k z7(pg7hi9+t_C=85L`sd&9?9%pK|u%*gPC1xP?H%z-p<~}Rj2m>9B^|<3)dNe>)2fx zSR`(avu#d!;2?s%F80|$7sI(n%3F(oC&sE9X9S@qxEF`KNh`Ya;=`&djuA)A*o0{(Aur2i&pGkFH zZiitCttR7O6Z4Nqw~c$Y+F8&<0bt1JiT4~SLGfI1uIl{vCT~Ds8Ig?BKm>;8K$PBR z=6jH;v@68aYtU|?ddcN@;Ah4M`V!m#rAWhG*<0z`l$C01w1?Kr2d;`b$7|>zJ@5XH zHoZ?bjqBp^W{BhXErUz!@~Uf$mZRuiU7PR1%-?jF!7u+n{Y<(0{>@DEpas;WHV*e_ zJMw5F^ews>;ZLa2+Hm}@rt4%hy!Y}88U8K;AfAFFY043Fa({e+L8?Hz`O zuES1-pr!}dOWN5#Zp&&+6nkEDdDvPDmmc-P?ohYY!nC^Su$IQl?J#gP(bfacyUba3 zB$=nRy@B9#GLhJC9%CN>$Q)8UU^5mK2#M@@OyjUh0f)N~=RA^?@eIl8G!2;!r4Hsq z6xrOx%Ux-9{h|&i@IpqS1A60Z&g}zNo9cl|VRuGj_R3!zdZP5|!r8UKI!0OkxH%#@ zUR^zDy{wM}DG0r=qptF#%wsw^G5UBiNrLv~hNcd0 z5cQ_kC2QjK^116;eyBE0aY0?!b=;{M_5i0YrVx<1dTUYrw1#k){Bn<5a@@E=@i;)E zF5Yhvu<+ZfU(IVeK#$a-aL|l@d%tAq0%~K2VwG)hB6A6JeTkcgO3wp+TJ+10mAr%x zzazIRtTnpZq5b(zkI8WVcWeV2bsU>GO}(h!g|?DdtVE7tAP@Sbz8X>0=R8quL#a4l>b{>B44usCPT$Xnj|mkC;3BML39h z)Ju6QqCjRT5F;)$CNIx-Yt6$1Hp-nh?c8$5=&O=UYWn3K#aqG!2N)6bp=`H#{44PP zbxgABlPatF>B7JBzN_^iPT7&kJOK#i)^He6ck#EghQTtQV_m%9r#=GXM8+*je(u>X zT^T3@|LPoA?&4&k9LY_Ei;X#~+M<&~2wL1!ES=*QB~>oflD zRYpdXr1|fo;iRoD$a|v5&U3BmhPC|8BXf09VXl8)Xu~;?wL3+Z33x?cfsH8Cr2b6G z)Viw2+iXOWT-Q9*&;P}hvc{E+{!2}<0FGviS@6Okhk(u^ifkdA8DdsGg-3Hdp$?Qj z5QSTF1a0QrL+((|sd}BxnUCt#7rIXvo>#2Q`p;FKCoWb|gc^)rMfA9vLGEXjwXTDI z5(VH7HfEE&5bbu36nSYVinKE{`Kb4*)-{u8J(o+7>o+ot5O#UQB&5l`kb`g|p^(n@$Xwdyh7}s>L;% zt|P;0+fY|O6+O$G{jrg%ehBai&_|E2tz6Bs`78{)d1J3=5NfA-K~_J;yS=`!YWd ze8&7k?HHK0?tDV3dG+cbnhe9_b%&OdP?Xr7yxyHH>?w;fakTL8K$pPeFP0Pd=-%G)&eT1^WOka zqd(WAujuOavfiEsnD*&nkfMRRGCS{*C8tT576A0Df_}!e4}A}9r}SCZ>d)Pu$3|S$ zoll)5+i!iVzh@o!@&7J_XlLys%<@=h6oX%Wo~)Gh&d?l zPBdWH1a)G0edZr-9D!v&G1Uog&8=sU1S?eEXM1vLybUK6E9D>ZFund2lvQo>maN)+ zDJ3K%Kugwn0p!+Q{|t3-LRKC3rt}x|cjIod9=d(7?CpRE+yT*t`BdgRDZV{XW+Ih= zUW|_Gw2r#o#FR-fS=DFMjYzTJHQwbgPN9zK9{^_62m}3@Y(Lv0b7}EJ!YN;jtWcLUI7IdrWw2yQdYYWm@E?H#HqZ@IVxP;; z+Pf4g2fneK(I$$>eiM3C!^sO(4Toz*DQADy{ulnO*m(~Pl-N~a<&CniIb8v|bnL8< zbA4K$f&GJv=grjH?2ukTYS`sRg71No0bhZRg^XP>1OW~i_CKClYB)_m3VJ8l_kO=+ zR_Hh7*j9vB7!u4-;ax0X^7ovAdtHmII)7J~ZCqnIT+J}USbLm|nVRSLaR44F4bbUF zYQfwg<6dMwz#^0&FmjOk%?!4n^wE4SeUyl`sA5hoD)W6#i_Wz#`Oz!I+N9w+F z&Jq6i113PO#baSUz*LOr1*`^}{M>E+&YBcUQEwG1OjK)I$*CMZUiHp@oFTI*ByE5n zB*-+MHLF~tkk5L++>(haM`qFcGaX1_67{(QSK=D_nvaS308hOx7U>~yXtgQ|^%pDP zn01cJ74_4_@wL|@ZVuMg3IU2uyQpSVu$R@;hH@*gKBb8P)~DJ0n!y>GF|WVW&4kx) zcdtd`LHPHphD9bO07A-F{}Q~ou50@8Qa(3CT>4Rgl!(c4l3cAP`bbW zM3ttXvYD^?`tai{)DW4GScSH=O=@Xj!aN{z?}e3(gLLTEzxBo4n1EYXSSY8}fkuQd zuhWviO+au5#eKe`7ghsxVkacHEM*iDY-LOJ=|ThuSGkSTz&#n@EXbYQmm~v1SI&}w zPyUL)MUNm^)m8|6nq>>@a^Qpj|A7tg)8Sm3J|21Ue?P0>*U*Q_RTC%qfcfu-B^bI4 zekjIA;F}w!yT$lmCP8}xXcoXk2xQuD7PdgLT$ergZW42Cy1&nBBX}hHmPo=0lTP0F zU8s2Z{D^Qbo!^j)I5^iBRZ;6y%PN9BkF(As$`(UbV-ot#)V7 zd{9jabTRw1!a8L;@_J*6%*I+B$OG@TOd?r;fU zBCK(gRNE7X-~d2(#e zaWN`&hw!y8+9El<=rT_LPh|KM@*VCnh&*7l}ZjoQO?@|woDhtlbo$8k>j z#xNb+;GDuPF%VYD(v=WJ7T{7?Z8Dhbo;EHdDjbykG>g$zn!BO()L7UsDm(ven^f<@ zUvJUY_34ntw!HD{(9)F}ml*>QUs>Cmu6fEFF5Rdy>9&&N4Uf=+TvY*clbMO?61~~D zA%;Qws`d5jOE=!7cu_4!RlSC*;lnD}-rrWg;L@rogeRZU8|I(*#2BV$Y{iC7>fy|N zsg*S7(XNe5o2=oEM@5n7E1M=hr2XWUok@6f#|8cVv-U=-&9<#f@H+4IkCBhvLx)h+ zAOC08sxoi7sJY2e-qGs$dVA((qX*81ts?}nw=RpFg)D}Deo?thjrYnJApOmyNr%qe zsqG(m$k|aNQ?}YD4IUs!kz?IdVF=@VJj@CtsdB5z^jWmz+vv)*If{pyp>DYXZ`;GfbhOusc$x#7O$sc(+k;p7*L@K|`u``b{B?k<^U)D!4 zkdc5ZO1_Vnlr8G@CbrOa&kYAszCssb2*Q`w{DIkYoyrXYTsh<#-9nXKPpK6MIKreh zg>*aMbh|t+SPX;Jtd%CKHcP^+&pancSf?&%&oF85k2@J=Z*UdpRy}@jQ&*VlZ_2ZLyW#iRT(|latmmV==F^yy)Fa-$?a8NPDZGxWaFH5E^%v;O-6qfDO>lxW z1PSi$PHdHi|5hr&7=K_u#+3!G|nX%*FDf= zxSA+5RBmY)980|7oiELQqHaC%?rcVSq7L{G>A^I>Di2S3aqHa{lioX|Q>$9|aT`;O zt=>d^$1G+o!^?<3n$HEJ(Y^)UG~o#)lA*pNo__N-Nzkbf!{`GF6@difPVO%JcwAD2 z;;!(H%=FOty! zsLozyWr4u-#gs=Kw!Iw=+nalteviq?{UJVlwj?3bjwc7NrwFom&aUJ{YCHUhiFq61{Y0eZU;MqebJAsm*)h>NF07F8PPAw z{+PX={4;oUn?Tk&e|0CBOGFtIO6~#h`0wNUAs9VZvhi zfsAvh+APqYQ%F4^eZgqtz35>A_r88e?~K%r#iI>;UUJ;}jThHB-py}!H)D1B*nD+) z8SgjL8W@gG7*W2XV!+xmIl>W3+Vf6SCuk5B`%!&Jal0t|?=z&m3?~qdXDY42h z?WarEu$Oq?jCSkUCDCyyiHg&NBk2>u5%w2qR%y;`5m7TuZ&dE4~c%z0W*H8*$E!BP5<>R262WvaI6LnR+wpH0%faAzeS%3 z5EQJC#sBR@7T0}+w(8HgV-;|^;Sz{Q^E+CeQQTYu+b8r*Ew!tF`~&_VLMyPD^o*tl zLLM1}c>0Y3DUdFo3h8L3@>%zy_CzsBK*DtX5hckjn`9j$?C{>iF_}hxbZk%xc!kxJhQFe>{7R5fqFH zzYmqS$8ar(%oRGek|6G?z|9x;@Z&i4oCJ+zLo`E~a|Ho^|F;-p2oQ=XKM41QM*Pd#GdOILR|* zT@?vIi_B3eVPv#L#QrXeq!|JgMaJevE(6v8mf!bG%B-t-hChBEfi`2t>6?-TaNH$8 zkecE4!gth5uT=}aBqDmtfJ@Ng`P#|$^&bnepp({(zbDa|-XZRNFaKctHZd~3zr9TY z+UAQJ5Y;whb&-Y;EXH9~sFY{e1uWF-z@B*5M@q%~x!vg2aysljhj*R!oLMFTkazhcDF3 zOoJMotI~~-s)FIf1vOENTLQSlHvShn59#;{0Jo~j@OVO$@jR{TwUtU6*=P8nCppua znfg#s{Q3M4d!0ctJ4}WvgQ1boD6`8zhhm6=DOV}BK4UQgAsaclK*krRUU`(@7$N7z z(Vs9CLFU-aH>&}$hc|o4!C{W)XJejJ-q^bVK^3j8j^{(^`KF-GUQDMIuT?Fqqnd>k z;l$G#3mXL79aKlU^rmCbE+;$XH?@Oa!sUEf*Vhz<1W9fQ&P#SXf$iR^x){e;bgZ26l=3}J6-!n1_pF{62hn%e5O&lP#O z@BhG(J~_M&voVH}Wr!cLjffE;?P9GUMO-FeQ4V=ugyQvSv{w5URtzSU&biasY?7m{%mH!%*u7|t zFd|ST)nnb!pyDXgVIY%p1YtjU#rV4X`LSenCL(Dw<7^!%`-sx?UTW;PX|uY&a#-($ ztZ4Af+omgnAAUcI#kBfle}0{fcmKG$IC0>DbmS5SnH#;gL^v#_&kse-1QqzxICPz% zC6DX+zd`vG4?|%L#~LIE6=*{$XYySG?GwY>=MDU*)LFSXG@&Ncij#q2<~81uwAijomzim;n|sdGsL@Zp3{ zy}W^Lz%dR?(k2J2EnIboRH@nY19uo!lJj&Bk!O5*Q{~U? zh6C9?c{3e(m_C%}!eR^fO(Ed{h_JQxU5?hjXh7Nh z@{f=g>AV;dZ1>#J$}L4Ypb7a*`>zY+I33E3^}}t6RWw_X#t#H;yDw-2 zf~=#cgDwRQ53-dR)Hsx+Sl)VV@t*#4S^YV1p;VCTx%}2h$R+LZ{(0GA?3Gss_Xuf~ z(6Wm-?CUWXGgm)y#rS7ZU=Slj5wY50(dx9^_WS8pH^_z8*J?b&8+1-2w0oH1<4-HJ zOIspeK^6BTlXZRijyC3sAvXVn%?_{kIGg6r*3_A!vjb`hRsgG(k;JbvrZM5b$% z8q!E;7PJ|3O{VeIklpf}eV{l2{e2aSs4&{A@EOtf=er_iQ;S-BUVmNL8@KE&Tu_uG z#p(sRejT5tK1xM^~ z)R>u^A6uLA8#*p^1|$sjej-l|k&9e74GJ&P{bgfN9PmSaowQtXCa#kEjjf%2q<03+ zajo#;m#~ZFl6JFrWE>gF0mKq;-i2eorT-YH;(DZxW4%pvR(~5;JCrFHrbPfXf{O14 zf~IbCLtsgyvgC9qz&zZHFyDd0v_bCfL3qfFo8kL)&T*p<)Ze#@|GK=XZBeXMS)4_} z^_kJ@Q9mfSmc#G}y*dSzwqdfmN{?c3*YE&im478fccc21ZH>2fAp6_F;lQq1GrsnL zq$|BUZlihg_sAFmw#!%$EjXc&`-B1j7GydY`0~6H*s^a3QssHVtR_G-<|GWvn)}Ls zVx?v=-jvpIp_i^ro^Fdl-N5)a90A3cPK3W3{+@z1BY0c*Vy6rq)1=BfRA^9& zY(7@O$uojx(WIp!DK;+6{;?%QMP7m|&*b1hbNLplho~#D6ETH`!62?zY8R zAuH;l?kywxhZZ%8`*MbGz547x&!)#i>V$dR!yX|Rp;4o4$I8R=Nh3U0SKM1UVdS*b zFuu#*sQRP$eN$1FxxVsDp(_)4s^uB?cAAk7m7Wu8IMAG3f-oSRR+Q@+{KLE2pf>F%F)7&Ro{u~|a zV-c8H0%e{@dF=t6wzp{D;}b`2Z>(ezAO$garAbf=lejO3i2F7pku@doIUNmrbZ_E1!W3I` z&+t6>ltk5D-yL*g#-E~zDN`ZZed*I6M^OeElxX&lM!t44MB4rSVc-j!?Mw*x7^Sb3 z-S2i})3%@GsF)s-gWTAEUi}h+jC@SYiEj*~23gEi-J=c9xtgPSc)G;NZ~2`Ul0rT> zojyF_wFpQ7WUEh>KZRM~P=I#}UqtnnjuSZqDQVqP@=#kYA2uvedS>Yja!k+>_0}Kl z?6aoA#!NlAiZ%w#g(bpHTOWM_RAp@Q$y^1bFB@Pb_!J5^G2-ewLXB8VH4iS2^O?K_ z+fm+nrmS-xqt$=`Ce$Qf^QPFOq0+)=m z@(b-$`oUNQzcm1}&de1SiN1zR;GaDIeto?&_GonETU!+)(%X$p0V|fR-@9)zLn&C3 zJ!4`y*Fi^RTKXA|A;X)5o^F@mF8sZ1Xg96VK*KFPyPAx9S@?fwS zHhm}y=x}8Q`Bs%-WmgX&$z58tDdnYF(6X}vr9*eW79CfzdWF69 zu!mJJD^#;6oja#G;P;A-9_$W+HlsbPzUh)}#k9{Htw(=e<$4Xb@f`O33ab=Nu~djq z7Fk-w;miN&Rlj?>(mWH0%J*E}85GvOq2^L}xdpJcly-!{a0TD+y1yzRB?5dwbXGBn z4hOPf!m_U+q1HBtFej>EneOYHE&WCCVSIf_u7ADWj8YA06*?a>a>ewc~i0)LnVpeptM{%@>zZh6i>9n%G9S7e3DbYYVpnjZ|IBbwKe zLW8op5>>R_=0QGMFFg)fFf7AIY}v4K5}@={$h~BqBs7Zb`i&c3e>GOU$32EKJm5w6 z<7A{KxQDca;!wL<@;-eYRo?6u@Zc?wLbcxV>1EFiHrdPc;2Ht9s8gr$If(xTz!Vb@{>1m*rxoqE^a5U z36tIZ9q*2+c&o&;*FrmG;_t^%^4Y#A892WFif{J8Abe4qWT=hgp?4{G_!pU$ZP%rQ z2&Tp4qrJP;trJS{N4wOL@otSmWF2`<<8}W+EZ4lYq527q;5OAA@o}|#SD(dzN*(nU zJA7WXpvh7++pj^lLfCD{8A>Eo_rxa@wV(ey-FU9mRKLjBpd~tK-IKi^Y?zyRVk3iF zk3%&=X8vZTZsh76tl95$Rb77TYL>L-cUTifO6wjp3pV(?iPLRr^0{e%O)whB3`^$HpK{Hr0REu20&JsvF|~Ft*@%Xwu!h7h*nv~<(`1IZHV<1<6DV0 z;}yx+_yH?+t+zc9*WFi?M?IZBVOQMJ?jQ1iUsWV*-8tYq7SF$p)^L{kByf&yt3MtN z9OFrtQ$@rH{pa$8t+Ns~6b7CU;#J0n?*^#JJlIc>EpYM~8-zJeHBBWu{)4_4Hq{?Y zd?iVia%!7lOasVYH+{bxw%@*aWjDiMBpQTDg!?N`$#*9)rR}M)^(a!jiM-Fcl1%W1 zeR2Cb7b)#@Z6i=;_micgn?@9O?u2uln2xhvX*l?i=VFK-N)TO3{1(zUsm$i~JB375Z&Jjp?1tpfG8M&HmL+?I4$IxpPtI-u2Zy%3(sS zhq|E!WxU~t+zTS3j4zxY57eOkNxdGC{4L>j_g|k7hc>c-Z}hzr8W^v#lHT!iJ`tL= z3GEJDaE^&`JAvnt6wd~TYRqP_Yfi9aEle))=rzsC|1q{@)gc35Km^4a6(9AH2Pn@r z!~cT_ipmETm*O1sq&xy2>Ao zEXb7UfCdiiCUyYzH&)d0L8o$}ENo*;(OoaT<=@=f&ud4KbRnRCr60>%043V5bHWh} zr$&I4vqHRWjgy=B-iR*e%E&_{n%-g=brVhIK(ZTd4hr5T-TJ09wnF#ND-yN*@a9pR zeKw*#H?d4z&q$DZM@4bVFAZwHAMB6dMvy4aDEiceK#}B{z_d%OZALs(ovHUX8&5O= zRgz+RMqn&a?xWn7=#1cWPvt>&x;zD3jcQc0slFaRBIvQyu9ol)hrh7@=+vy~P3GA9 z8|>w~c#O%6=Yzct3`%uwA_;l3U;b@%e;+w-<9HK}YA&Fp6$s-~7(QN#j-j*>y-a%X z$-XRO_7L3nE^f&qG~nS{4zl<5Ixf*Z%32DtJH(#Fmj)rE9CgAw%pTXi8t*aiR~coa z!qK;@V`65VnG>@G{1wmg4CfM0ea>I}EIj>jfYMWuF!((J=`sgED(E!q+;hmo(!93A zegUd8z(OikCAhWf(~DqmM6^1C6C9*^-`uKRlZfVOlwE&~xSRaaERQ}^78OUjC_mn4 z)h6iiQs?Loqk4TYS!a(J88>Y@kP*@Uwa3}r?CH1EhVA5qn(l{+cicS4qcPhjIvtG^ zU-{lh)Nq*RWbjXwtBUH@@8S$pqQlsElv8CwW3aa^-b_WQ;DOHmP-%hqOtg*I?aV;L z#R!MV8SST*AI0q8az>4=EYP-L2vb#1&05E&rFJh^ySO9Mf;DySHlA%~z`Y*ji{+hU zJ5;+6=c2i^;r$xsmG?}Od@CQ+7P#VR_s15H#1$O%d0brl`o)hXMLNL}veEwls^$NK zQqe$xv`0-6iM8B#(A>Zy0Hz73H9!?QFFw&W%|U&et0 zh;3D+;X|F}sUlPg?mpCmNLH{7j-Q2fTpi8LIO3*nc{dY~40|55I_JzAWus{G9 zt$G4d?A-~ka(CP|xWZt%7Ou}Nr@kTBwxX3sjY)jM5cPp7L^?Lo>D}_@>j$125j>>d zh+-jOa`F{t3_vJy7Xw3ZKc;XW?04|F*%cTn*>>C~LyLXz3+~&G$sQ`>=cZA_kEj0| z&I<1L7e7sKzxzdbgnmOrrEeI+hSHIa(cC_vTKKzzy6jRaG==3wSSaOZwUPj{s z`o=;7i`$-FPcy0bbj|Sns=Mh9)5Vnn>`6;9IxdrpaK=UCQLKJVwKLu$h8;uLFFYKV zkbP=N+i`F4_(Wt;FEzK|zuIWoCw79)0PO*8{ALJ@^kYzrzijwvw zObQWIJIM^=Ef+=u|6*a2P%0vyQGs_cUENLhYfT^exDQCIf^3Y1i20x~rD6uZM^_XZof5>c6haar6pG2t zg`HAWahowR?9|A$ds)oI9x7k>m53)0EMkgBz=({v-EU%*+9!>Qwdwth*GH2YJ*5P% ziBWVMl@E_&>q<`d6@CI$+-U@DsM5np&DjR>S+D}@ z_c|MUSv%0p)!j4Ei{?y! z=4}mhSGIv3W+D#dV6dLv>_9sJf|IwDwI(@*%aMh=z zDObFSe-2xdEpN<>cOhi8+UxLlQNR}glFujIjzbmzEF-a`D0V#coofXEV)-lNIAB&- zN{t!^KNre8;lD~KE7|m;Qd%g#Bb)#&hxHkUXZs7qN>l8O{jGZ$HB!4L>C~RvX5GS_ zwGy?>MJH}rLBce&1L6MsuCg*U<@O9v_-se+^!>Qrn4X`mh0Y>aM;z2j!enUn)PCmtTqk&oN*PalEXom(M zBA`hj4j5wp4@rL*&M<8`Y<7EQetP>;>8ugJ>ddVI1ML3@`~P*6@QbtHl^YG0urNeg zq^Zpr4&MfFO=M^ax)9;mcosv|8 z6ad;$luhv-q(lZ9R*0F#O zn-|mA3Bi?q-7UvaLdBv3Cos)_l=Jp6-n61mw{!Zd(5|?3f#jMj^>0Q-EV6m%+t;8G zIJS>IC7ezAQALHOjiSo=yfcM;r>nC4VNSobi`$y3_RWps@M`}ot*z=RF7#WAhErJm zw!Ri+I2lk)8|+aPeg#n#)T3h@{QxR6nj5G^cfgH=r77G-XqP}y38B1Vm`Atp z(;)3I&u3G=mY{eJ#^x_FekM>E0_$PGdZ>+aUGN=zMjAI?%ON(t2QSpGFO zvi2I++iQ{eC&bh`x#hvv#ssb=TVFLSDbQJ8v8{>CS5%@5t5~cC1n@Zw+V?HzRpxq6 z``_2WM(cZrX8GBy9Y-_C4Ex*Ux84Gb?LQ4_Aga!_egIR442k_Df$>ZH*MSftFYI-( ziKAko1Xp(U_gv%aAFMPz**V-3UnO%Sy#t<}56)B44?;Fmt91y=d4Km62YkRqqK+{6 z^YFs=p99;7zLvT5m0P4E^)F%88$>9u+v!JIVW*#C&zn-M$3N*WzCK&1mT49}<=$$W zPoyD#?Wc}O*U$+K@BJ9Ychthy9U3=x@wqDmu0r|Z`olT?KV~)hMivNmrrB`3)0XiV z)OiUMDn$s~XL*z^gDLr)VJ@$+Xn(v>3p6iiw9M=1LcST*yfTF@_x_{Bu$9&cWz5Ou zcw$@)i5Ebt4%hub6O`JEMBF7vkptniuyU8b&l7xZy@U)m&IE&b<|84`Ck2VZfXXOh znEO1@^znl1`b&Ce+lH}Tm7$d>eDMm$t;!4Na%W2?{$Gm2z?u;FYtPlJHj3=59NZaS zEI?GU_^(F)=?-53Z5_KQ%}JB=hsT>U-}CRe-HRNt#!Y&7`GsP&D0FV(bdvqWRBaz` z3FKsdh99uBq1b;!YYm;$Wzx(W5(ukme+VW+xkTJ0LC*QzI=(C6=>8S#hf*fEkB-al zQ$70esp$+25eAKQ6Z68I^iaT%phSdS@bWsr2}PA8t9<^R)BaUrwxD= zX7*|h^=j`bzcl7MQiJIUED0QPS#jjy%gdy@M3N0;r}-+3DSHkB?D+BV1mp~?xZRa) z52>XIyJw#(Rz9mvG_$8sza!2$|B1>vya>fx-|eyA57g`gE;8@&MU@anSv8%Dh}rg! zE}q`eKyM^Mc|tvtnyKTbi)}mK4`$n+);C@Rd+yRS@Ic{}t`s{gj*6J+wwH|j2-QfV zzgrgFnYh8wPnkb8#=#N2MF`tE#HErLE0=T^`qbxMM;m$yleOuS*0XQNU(cdkoUyIc zvUUsC4sy3?RVp4orZ#(XQ^0s1ln{1xS#?PMRp_9`!L})k9}=RNY>oJ}UX1UwE*>>p z85ehW?qa%##R)DH=J2IUR!Zzw;?XN-Wys^gd&Nb&k{pM!6S(uN^=Rf;{b&JyXR9!- zQSJ1q&)=QaF-2N`7ZEdc3 zz6&Pjqkd3xZICcXQt~jd+n$q0YWsmZ*NnVQK$o8gzMsT@Qvc$$yi6l*Pq1s2QGl8; z{RCSbxjtT#GP}9L3!ActjJPfc@h5|}u{5Nxh^5Acmc1O*!0v3g5Ri2%^5AY^Gh-NAFx7z9pJ7=U5ZuSo#~a zIEFZh`nE{k99?-KTiBA8U3@7rfQzQ$r!Y-0N`12WSrjegir5Jwk#bJ8_`yvX)LcMd z`Ul2enSflG#bodfD+N-O^gddj+#7N4v%ctYcIQn%T@Y~nIbt@cKSgnT2LfBGCL;fr z5lB_e)VE)&|C}|3p8vBR0e$=VBA%HBy_oz5UZz7vfq96_4{?BM` z+Zk*B>?Qc>((V)R>-m5i(IAd%THd=pA7tGf9}su9`#WrRig%U?wzJS)l6OA zxL#>k4bVzud5$%d+R(twW#_xVlbYoHY*dhfcV>p#64NUr#8w=D-~fisC9-^{J=)l) z?gz7}UJ&GaJ~UlSm%h|e85X4q3FYDCmR_wX-r~v2#WYO4mOM=cegE1WCj_%tgJo9< z)G+=a+I?UTA>yBT)jw&#G5UsaHZy4WSBy9pRv!ab-^*>mQgzIn^a1}t@&R9#hd1P( z*F_Arh@W1Yz8wN+cLE-nF28h3Es*cqGy1m;I>I${``T|n47{oYBH8jj@m3g%n%;yy z(QB-$SWEgNnn%O`odwWd)qDl>yxnS){BkgM%DN1LyI&Jv?^*`f3 z?b!s@&w0L;=m3b6t$N$k?{~Ln@BH&0d6)7+z&$C;?}!w~=)I-T+AiRcs0Xx77 z80%(x)0UDTU`2sOua_rV`d>|}6u?mY(lFrvYig@a^$>eEM+u-^lM2Aq2($ffQO{6| zZ0n8SX>fnA3;3LDoggMCrzboBC1{Ss-%`b4VZw(V#K!V&!lW9soBDH=@-I1Q&BG9q zT63d+H60y|mjL`vZ2NtB6dJdgv44DqH}|ao>OnC(?}#rVY+(W_vG@Jez$SMq%ge7H zMaV?Qz7_Ju`6I$HH&ewsKd=@}z=%K2{Jb^jj2X}9JVI-dqTxSaJKI$x`0jPv6_aPa zbkg1re*n#))Xt4e%_*IRl7SHlC7epppWmjDZZ8{qj^m=QD)icPA%>UvIfXbC6~Wa_ ziVMv?9$*X&UA!{wqOLx}Sogg^`2OdcdESswmX~}i(kN(iXK?P2wpce+1>UkNQQ$i* z*qTjyyGmZGWGgBe%ZBn@z2h~OHJQX*H|2S1?KQ*(i3+q z#NNN#Z{m>p%isEWXA4OColHz$4<)wKWrN4^makdkV_qN@ek?Wf~c4p?Lg=2N<< zfv>E7Qd)4L`ersiN3WK@Hdj`Z*kbS-?GH@=|KYcy|7X&9EK+--==aH09(KaD3aDiOcU@E+hbj#unhk$amZGl^Y&C9g^?p)Jt+YWsxs4c`lv zTI|LQ=C5Tt22)A$_)ZEe8YwM&RQlJJlaWt?g>CTnNi8QI5ZO>a0MWtPQ^sDp3vfJ`|Q{lc@HwxHrvVUeHE90Sz zq4$aw=W*y%LPMQ~MBlFGxoj{koMWq0I4&3mDnVGDB<8)I`_=SECkMKOt^u&IdXD4O zH#|yZK;Z%LpKooHIG0*>AG2G-{c)84<@i98XX@J^8y=wiufIbW?!PK^8980{sjHIk zkDvx`8+IUWc><;YcaQ%vi_iUMN@?_}iYINNQ z-6c3-&+%9v5z$kB5fg9|aE)|EKc2Lcl>H~ZW;P1ro(*k`3?&qOwS%}L{7Zkw&W7Ul z#KTDM(@vM!#(Smy7i!@77tJ{kL|E|go89-XcQ=gB@CrCSgj`ao#N_27U4wQWf8G8; zp3ArqEnc5!P}9GnIvi;aJ0V&hM2^QVYM+iN@41lNMnnwm(@BR*B4bF2r3X6ZdkxJw0{IoKTzMt*~?EJ`xT}hJhjCKc&vbu48oxK0d z(w256XAZWiUyDIFNiC9rFIZ>nm53OA4KyV;qwE$?kbRKnO`7|BH?Z1qp@%pu4Wv44 ziT{y}#ehB_f0oQlmR=cFp%0ACU^?lv2;K25T$(PU&d@KhCiI%3yqpvYOJjM5lT|_H zy_BERv6vUompR*D%Z-$Snyje2{^Vb^m(aJT_5bW%Bl#^AH#rj{C*98P+u3Ar`U^{|*iWhMNvMITpS5<6 z;xHHn>2@OCCrAH^C(j&GWw}U|;5Dw9+HDXwxlC;pUJ6=Pvt7Aq_EOCN``zKfA5z9*wB> z6FTMKqUN#@mK=r`8FA%*ScxszIBg>BIxMd?h8|4Pz@imx4Wt6*{e-F0QSsB-ppFyR?9!6Mp~kBP z^%d7Iw9YtLQTG4hS%&^CRNUzW;vLiIo=Z$Yc01XJ@Xd7jv;7X}1Hq&H8 zqTWAC`nFlel?}NDv)4m)(SuseGqXYC(^$R0mlzTs2hfntIun6ty3me=BXUiSs0>d# z3JwlRruOKn?})A+>Tb{#XX^>D{~9@fUTYpVT570PP=DQnrt^Ap?v(~!K@M5$5>r~7UxGG+OG{v)B@NEavdfT#fVV4d! zE~CU$0!fH*+IAlCqmBk4uMVWhCUxHe{D7IVA6^^&6**;b{>Vq6QtVU9JS zmZEv8N$XKNJ-W~vkN`j}`M5G1fUu`~WOcY8D=qKh62^PF4GisFge+YS6FJ<9KaP*wBSc9TG}u+2Q{!;>q^AtE&n)Npc(eui72SZyD@} zW4{+O8%aDs;$-y2?hq`vM;NLK4LJxaao2u}Q@AQjEbHoOGHDYmi;>C5pssB#H@7uy zuL6}+jm<)@udWIFMPTv1Ns5jCH@f~?#XBM zdADk%*qCz2?D)r;d9Z*~0jaG%aXid{&;)px+-{oBpZ~gGRfJ3drpH3^3AXptd(w93 zoG|SVzSL4=&}?1>7^7bAhz?=7(ti#V0Gh*r$CfXFgBL-CJ9DM*kH7t8M36yCMfHY# z9dnrF`=V-MZ%6W6F8Zl?`$(o>3Ao4q%l5wkD9Hx&fvj4F9zmteDuW`U8nW9|*tLWw z{8_jPLE)AXcd08mn$MipT)6eEZwTBf_*NT3o*N3bW8Xtm915=a^?JS4*)y103b0^j z(vX#M(3x=IqQJkAiJtm9Rmb#ezmbvg3%SQM{FpS1xGvi5=qkjg0jiSQn$9aUvD{0x zjpN{ZKc;8U3`Fx>uh90fS_C4w1Y*mIwm-HcW}O5rj1w%#NSk`&pho2@knelmYpV zqJz|uB$g(%^c5dUYo(bW=i_Xob-+vMBbJ*f?b7hG)oXbXP#+W2W;~9J57MEW7=Z|( zGo$Avn~*R`i9U?Cc)Ju$4fS3W%u{;2J)O@;QpnNsT=O$OC=_RIF%I@rd z@e4GGq6h(D`V~s)kMU-8L1=DG+uYOT@b}}tCHYn>wnO)HnXiATYb#-YoXl{^CkuxA zwo^4)mbITx4jq=@+@C%THC&Hqm~;h?)UPA`m?a1nk@|y_STXyOyq2D+cbSNnBZc%^ z$t}(1w3HWGwK9miH$06H0YcT%$zjKFok!&=H5$ySWF)mTG#mHH=*Yu_vqXqRFA}wJ zk%!d0N%FGP>9Bt+aqx%Ril8!Rn(V`&&RBK2j1?>*CGRCmDYe}}^MiY4ZO~9g#rfZ; zwP)d_+q2hxZL{TH0pWycT@;895-~b6=f6nf>tlh0Si4Y}>(hPib!S)7n9|ZxA^>}m z8T|28EkF1I?bN}iubykmL4?O7ZREHCrXe|+7q`AhH9XHc5jb|d&M5a90`;uliU~dw z(1%PcsA>ExB{kqxHZi`zGtWFu(j=j!5HKDqILHnLQ)6ZDii?i%@7b&Y{B5Ks}I;)o~#s3UqOk1;6M#tavKl}|f{|*_>lpIpqqIv5KR_J`r100>5tmUd)JU|=s zed(Lm+<&u5zMLRf;ViJHyz{6p+xdlJvu=gn;$MSPam;V6zq-ep-%Lp`00b~+jYLu% zTBjGa1PX%pgHO7*Waz+|6B`yb64w{j5QytJHS$K6M>PI^bd5f*=0E!kLcyoT$OTIr zfAr@I&{{7mDV+g9XJ~72`=R0ahpq8*+4E$S#eTg%8@Ms;UAdS)?e1?4v>F)4R3xl- zzJ{4HXqwKYkUi7%AdzYGZ;9%4)HJ52Fy=r}Xr5`M-2YRSRAWc78p+Wo+mhWLYD7KM5pxFKt zKCfR&+>p-^N?ooqlfOk}J8Z-XZQCN6erfQoSSg!o+3K%h;0s+gx(D=eg?O*bqLt!Q zX*c!|SS?E|iyz4dIAoW(!GXe=P*W;o?OJIbc}%5XBOke6TjedCL1H)SJ*D&=&*8!G z#NkUIA0R*A#_zFsDq~b1oSedll9zM@WtEh}s11MbCW7DXd?q+{kEuwo#TSu(gDi)J z5spjPZI8S>g*F7FfKPS~ls_(w7tu20`7PaZO@yt%8+leMK1!w<&x+J9h!Nx|BRy3F zf!8)<92L%yRW5&n>bV$75&R~@>@nP%Wg^7F2fvG6;hgGpB%DH4>VZmBw3yH~j*D+m zw2c}jkAK@|?Km^Dr*ed;f9DNWiE=*#pj)kvnC)09Xp~U5`^Ot2@C9}KJzTcbe{n+_ zw$ya~EqI;6qF6Zd8(B?tLS~WPFxP5~5|KXQA+t=eZvF6#J^}dZTo>9F`97}I5WQyZ zk+~6*R*?cS1_uCsiN%urjsR_W?8U!X48Slw-1JRg>9@!~Veu5ZPQqxLHnLZLUPeHGnt*0!fdW}y1$krgpko4q$Kau zTDM;v%Nl)35K&A10~kjuOEKG`qB>#RP%jyHZZ==pBSIjA-@=p!8WjyE(%vT_q!}17 zp{WRTB(N2$w7Z2m2467W;3zX?^gE^$5HDsu%FHuW7bd zjE@%Sn*aJ+{ds`f0?PH?dN%R=91H^MPL-=<*;}ef7k74RmrPTZ2vm!J-d*dBO(LBc!WJ7<8?hVn%Y}VK^Xh(DFzd0*`*c1 zmm>CzcdI?A$Zx8jr|Cp8HpKh+BK>;7+xR4u*;j$E;`j2XlbKEM#pgy7nLBk8GMu2h zIM=-TdYI(%?MgX1WyZrOaI+UlxxCH){R?{L6|*0s^=y$*(QnzF!v0g88!ElBikr(Q z1E&@i>|F5uVo(uDhW-?9g7+9Pa&Ng3E_q%EIf#0m?+$^o$hB*Hj@rk}IrHg)D}3df!|4Vt8rl3zR^?tjLqo!QiT3%Ptw+uj^SXGH}a@k8hkTKP=Iy zpPO($u#K%i{ki$`F#R_x5cqTjj7b|t{}Kc@Hlls);{@#;tdkU^wtBln)Go)K+# z%L1O{?F0^>sv{rYTpC&Mc<&JA;hdWX_2)^i6Ay$@CJXQ4a$(t433}{uWefT>QU?t4 z3tQ|C{}B3;D{;v4z~TaS#2QDzdMo* z_fc7#WL9xpsI_YJIX6$sd$SgAMC&(v(y~X>`uubcmkEonvtB%UUw54GJ)yBfxyS!0Gwz; z=}U_Z4o=sB8F#>*f)f_LDm-Au&~bNh`G^;=Z6S>O+R;?j87vH)HuawT%>6gb#`Lrr zzv4ceJ!4s&Yp^L_Ok#2;QsZ>=VCkH7@EG7T$9dk zJpt_Nu^oO1A6om=#ht>cq&Cr?aaZ?nrq)0Hqv5ns>Q)n;)>$}@7wvodPZ};7qt^8= z#iiw-{*FI2oUZ}Itl1~0(gNBiL;MA6IiWL?{c>e_2o;YtL%al6lYLI)bqhIWM5z6B z5+d|QocFaGXdVUoi)odlBhH=^*#;P^;HIbNua|N9?jynj0sNwxGkoS|&=l;qE|bBM z?|SwDx!E0BhdjzF_JHnpZxRfLWKMNg~0T)|}@$4kdB3Shqpsb5QN`bUssmlu z?m!2A!B7DylNRM0L%9-}=*a6!l`10=V|rvYSIi0eX?(HD8I5jU{-2mR?WY0tCw3fz zr0s8bpWhot(o5qWUFI26qOcl_O0KZ*Nu{P?6DD(XW+B_$G%nCWjO=-R7TFs?PVi4- z5)Ve$4CYN=)pMJew3P-fRkQv21o^ExZ9g>rN5Fi)N>*J*DHYIwmveDL&|<*wo9T)J zVF{6*+YWhLvC@COUUZU4rfywI%@(^+tSq7MJIz*7$Oj_wf*%Y{;0Dr6-85b@YKJa{ zAeaD=PTDI!)gSLBsZ|ZF?vKN6mI4watdST|NY!s86c|R-inbBIvZeb|C*W|vn!OBF z-k~+FZv8morS_}H|CdI~1%U4&Mb11NZ0zPSkxbFGd^!)05_4yDDU-th`=FET10f2^ z9~PP|TM?f=1fuOS+YOQb5lB0K@qCfXZY1o|Xzai1=C$ZnakOt0)k5qb3zZ1AvvWl= zrDU8P&7o1573j1dE*bsK5aS%4gM21fD)xCf4tu>E>K$@f>%PNjyCP`6H7*^V=XH3B zhiS(;)c?ROvgWw3t3ohO*$v$<*K)1lY>v2;H#jmH5^ccFaDF<{WOf!rSI8|v%N(P$ru zmupwgCj?%;zHf4Q%3kqWdT#0mYrnD$@b1Q7onL!^bT(|oO9!rHp6ndlble-?U!-?h zCO9tJwt5{eT5wFb9WOE-hhp*8leO>5`#UleONN}$h5F7Jd*`m)g`F<_L8smGLobI& zKgV>|*yIF-sG;($4SQWEj)A>?Q|9=sg0F9$&kXMuG=l7DAE5V6Q$N&j=H&b9Mx~Za zu4~g5v=nKlT%9ffkG`Ug#T^YG4IT6JUUHIdf;=CJ-1xhx9&G_<-FlkMMn*-IA0QPS zs90jf8rJA-D{gYxU4EJN3(aAKV3zVsou!#aH}|E*2rysx*=~L@YV;z5EcFTQ@%+(y z_3E~1vV%>;kCew@Hvi+{T>equ-<#CyGJkHMJ#kP3Hj*2;)I99|UsSziP#e(JHJk+Z z;#RDS=KGa@nat#zeb!!U zuf5PpPy6*DRjb(2uV8D1O9z^_87`>E5Bul_j^=Nizz8FUv9_WbR>?|5+VNV3KuS+p z^iInS;YLcd91b#3+>hAmL=-oa&;GZ zf|`bPOJhRnxT`NAXb{-%b60k(qH}aUMZA#V!zOT@*GCe!k8caJSJJca(IH7QEe8&6 zRGm99jPD%Kw&F$%`J$0nJ&3)t)GwWW>f^|;LN zX=O;IW>We>TJKX8#Fv{akVxGud^Mo0KR)HF3`* z@JaBU(Q92Q2q~00@YIA;UHE(XUE|50qR0j{pSR@wvHpPDqB)0nm&@C+Dy5ZMd{tQp z+rJUyfOtQ?xOTC)VYffqcT{D>Xt>toby@RF8@y2MW{ou^ zEFZUxz3{EPqCRT|o9i?EN#GC77wVRi`J^&irg>^#!;_Ex8c5toIxRZJU166r5zVq* zV;pMrO1zydR`w=lih3`;=3O`TmMq9Gq;76G%D&P!mm!Q}d~&)R_3RaU%D%GXSIwQ4u&hi6LVeMSbgu3zZzNP0kHV_RiX z=C|Ea^JUCn0W?TEy0w$|R(e4rfGz{CX zwtqZ+T?j*5&(+)e)y4bOYpMHX7;Z@m`{$Ah;nrJq-4Kr+X3X6*pF|uy>9Prrm8nd7 z*c#G7Aj~gwIa~(%6fA0`(7u@ttC>=jHuZC;TwQSrX}#o3YGS(eU47HZKPE9{@Ek0> zU~Dx7Q)@`Y+lunOk-Ngmt;1t@x3@-PaIZs%-rD{eeVx3xDu_SbNSy9J1;S7SMdoktH*&u3hKuaC9X;=d;@oQ>dKZ*Bo3)U`X5e%=LkP4 z;wg8se}*&V*=oQrtZ`@Bt*qtXtC!hSj+bgh(p;WW(ww!`1dZELrMyf(n40YdzsO{KW%k3rB*_MJW5Vmp*q2 z=7ZSPT+XujNR_z~)k7M#4o!)~zl8#meCE&nDu%T&_kNZ^Dt-X>Rr&N#H*AN0yR?-m zNa@7}JIbLU^^xX6x`R^{TrYpp(EXC7mI}g#Vt!%7LYt-f&8+5>mAA)S@p)ld{X(5W zpBp+BT~}7txAc{|E3?e{$ZR(?gICrA705=s8%nrPy|0H&6jk0MDvk0~H#f#d%2nS2 zGD#{iuyeTIadYWw_*b6Znb3Il3>{^8ow$@3K>=*f=|LF*~nZ<6!S<3w1OY!c#Fjo$WXCTg9agGj9 zcDsXi!Ug)jyCLkimgSYC>2CQ{bw?~p^}rQeSm$(aPJE?p;Ykk|&L^r{H-9(*$*gwQ zQ~b}c!qubSQ_ZsbB{6nvin{=iR|FnHy0JhT`Z*8PenGCK$mkULMR_8532--#XJASI zYk8B9G|CaCfa;!dT6OJ=- zfP{Sjx3|aTy!%_6*|f9sX>_0L(@7--T&L&lyQi-x!{7em8j8)T>UcpOCqyY<{?43@ zaA1AVBSu5BGn-YVTxdhV;_sRX$}FxyE+ZiSar~k|uBaZH$B-BiBE!7NXKEMVy*wsC zQ#=`c4TQL~lPtA4arDB!gLeGRH)$5dS8rV@5eMguEM@-i%K1!v*6ZgIpA{(*i1cAe zwXK$4yUi{P-YcmWkM&DU&m$q-apRX)!vfi*!|Fr6{#j+ziDqgG@8wN3BltY?e#u46 z%JnHuOW)k^ZiDnIP5dU@Bb%&tSF82T;b^W#z#}2RxyTv-ec#orj%+`xMbm10z+lv!8qS!F?)JP$g;jI+I z{LyG?Y@{xeTbbwj5+WmU#XXi4zdUNgPUNm)zAqRcqA0gCm){^fZ;<+nUNqAa3YGtr zoCzWtzn1pHNsCg^e?6os6MH_w+C8x^F1%tU{ZyW*`qK<+XUsWCFlFp7UcMY+TUf6S znOo+tb9L_)dy;PAoDZ>LIPe3b-Wk88IP^>;qUVQ<`fX}v0w6TyH(OFWtB=%LvC~V` zirNHpiDdB#Ti|c+7FXeNLy}VwRgoW8_-f+DJQiF)Qbqx8KT9d#%GRTBdyK0O{Z5Iw zOXPUc7CoHTfKk|%)JX>?KphDTU(SUQd=Kn>a2>K=wAz27BjT=nCnMQU;a@`bXStY0 zRu!gnbm3l5dL<*?d>>LUB{=EjxBkRt9{)nF+jV-1yGCi{Mu;nzNAut64rEs8i2-T) zLUMt)<$7@fkx5n8fohCIQ`!27WX1s`2|ON}`HxDT$2i;~loo@@GxNeU*3856YtPxC zg%QY;G6O=WB8pwS^CJd+m9=A%wo9h}Ei-{O<#jN;EQ>#tAJgfSd(vm zW`sT~of+wHi5$mm6Lhs4&i4^~39sO@D?ghS9<41$_urKXY=5Y?f~r*IE1GTf77v!QLijqP?~F1!MXu9mq6{*gMYr83Dnj zwmu;3$osE8KB+dJ7`E|bkfFWcr);03oGGu@P?Z{JYSM0z3JUNelMEWHZkPYqNQtn! zuaW0&b1MOwa@C`>tGh=rMA3>%Cx^p|(+WgAB?ykJx7ixJtvh?8Q(AcwbHB*1IeIlYGwMu+t zQ6#Nuel@7oI{0*#P+pXI;`EH?ckOmHmHA{AHG2@lr`7swFLG7N>sPTbkOA4cy!Oa` zoSjHCY%}RGh(j^*EzeEhaKDvYL(X;KAdaat6m^$X_ZR6e6B4}6|a|2JJms4 zN<#cSK$#jcM0yUMXs~NEqVFdL)x8D7LpXsm#}?6~A1YDV zP0`*f`XHq_J~+9GIp1a@@?0*i0#p9E&4iI*QTM~AOJ2Y=?yyl&ep#sNG8!XtEu5zm8%}fW3Df# zSAGr8VsQ|`bg{Py#s-wu!5a4SI8040vmzKNm&8LIq%orRm9JYU&PIad55C;az%+G$ zWgwqgLu8)kD~Drp2wux=WxU}1o|2(eDz!J61Tc?a;SGk$#m_4cA^Yc-Sz@G%9BD}$9| z)KX|NsQ)=-+pATUzBm%BFp*e&Iy6}(DBbtXszmFMlhMu z7vz9Xho_w#f1V{INB}${Pp)d;`FfbaYx?r_$>&kU0JRo2VqvNT97-qGJ_GPiM!unE%Hu`M!#ie(bG*#x&yao^0#3 zv9Jm<6$eu6g;TH~(|)f>I=(F^sc84DgL-9zT-qq(9<4gfQQAJiZNNIj~-Wql-?S^)BGF-)v#gDa4 z-5`FQ;kEEM+PIr`|JeR5Lh_qno0x#Vvv_w&A&%-7!mN-{o=O{k7I}3HaN8Ru)o7Bb z?BJP}+`q3lFpYaddLx+}_Lpod{?;B6s&tF3=OgX3X7y}znDL zEXZr=T+yGlE%!Z7@Ci$XR3MW2f-S$Qj*nq5Or29DfVNQ_)7?%QUGH)g;}gh1G;rS( zWT~CUiyOE0ruE)^A)7kGp&I_NG$CPT!wV$g&uMr4QFqb6G+4KM;Tu+#H3y+ChQJ0+7(I?5cLg%kJE%?Mrd(8b^d;NC zyhzAHgU8MWBr5p}EXKQyzgiKwMFM9{I6~551yKEr(J_IRZ!S#VQ6bMH zI9pw~kI4hJCW{{ePy6u|sIA3eEnzq>8xGa@Me3detRqB<1q;3?Hm*mwGY&${Z_< zgsUd=;YOh@dbKVq60eCqQf%<;8ptY)vwlmrc$x%ZK;(Wfe0ta|8LqezsM_^=N~UtU zS=aU%xb8lBG7h0S@t(mW=j-NGf4VI9o2PVWG3K{#7A)og&up@nr&;8H*R;7xM;5=tva&`fQ-FQbm8JdgqALwUtTI5#jYBzWC6yDl+I2( z?LC4w6D_yI))NbIEHM$L#Ox0z*Km}buvYth#n49A!Zwp5(SXO^l;tFnuVeTCxFBW&8QpijT0NVA(jXsKpk4d9ut*1_vOkna-AQAB%fCS|?^{na? zRb3_2@ixnFCO01R)rDJ}hep#U*MDjqSd&YP`l{_`!>9^qN7s6&SRzl;=gx)1uW*dD z4nNC7s{@#?hZRq1)Wl{ag$Rg=y?xRRTETLJH1d_fWcg)*WW z#DfHsY}#Zc_;H`asS^ zgc;u7i#nFKm!?~!dYsRvUmK8ifCFl+Pj{~1TeqY9Fm7NyerD7)GN|Dr_eE-S=T-|( z9M7i2%ib}`?Zi(GR)q&*z0xoO?~eFfU4YMztb}-zs}*C8@gO&zPyP@jC%(t>7pR2F z*`li=&zxc0+REZV7zt-bTU{tl*2JAiYtDH$dF9$XeP5FlcrDe=BvgySTUbyU-TX;E z`sCGT%a`}D+BP|B=DG#Taoh2M$Y=^(b)FR$(qz5-5VN8M`hVhI?;(}|LA{wVGGQC# zzY5+Eh_y&~ikf#yjlRk18nH^ZRj{lY@b*Ereu;s8arn`^K;@-3+m6} z5GuElb}j^8@-VHi&0l<8KiKvXx3%#Ke~LVA>H1JCU%~o|B!K^s=~>HmA*X@m-8sHS zXXKZZGYp|EryT>u1*UXK5vjL~RPv1sYhZQzlbxJ^RxpC-n)`t~?5umu+RHer>S>3% zqb7a%DOD6+7+$stgrDvzi_YF%!ZMxI@XrbTuD1BAQ$DVmC+VL>xXaIY7lU^C)F4L8 z8Sb%vfur>}7*|5OnSN!G=ciqE4)d?ksLTpf2FnViFfzB)Brog3C3NjtYxTX)U<*EN zQBn`%Plu4_8qeXQdR(8oi^>ol?naXqCk;FCyIlp7uaX)7)8^ygd56kZ5&(k;93`NOm|!K-hzLDP9=Uzmmqu>}$By9h*9_A`jweW9Kq z$)t%ooDF0^a6HqO@*2K*$}fuphL4=CvB+-bU>ixvi)N)z11^?JW3Kwvhy0<9_v&UG za;8tysrCby!(+bp`pst>O6voPh4~Y#bN#UvUjTIwXHSw-idGGZXF7Mk&c6s(=l7@% z>F~fDQDJ-uCyX&V?D72(q$X1#{9}*dg&c;$stfA;AI$9QyBYk*2V5P<)}xeM zKT$@*-o^x%+XXsE*A_7;-)guOy);h!#>0RC>W?iDd+qPF68457y;&|X0slMTRL$|7 zjR9C9zsg#!zm7tDJy*Q(Ou!>NVios zm>0NJ)Rp>C=iU9*t}!d!t6Sty+KhAuqB9)8Q7wYy5sM<;nM)R1LlCE4WsOhxY0eXN zrUAZvO)iKE^+r`Z8qh7P4|#S(`QCL7zS9c8jise8+)*&*5u13GJ+CSIg0-H755q<% z0Y(G%7+aMfWDfIZGUl`s6X3d|O@4EkJ!@aTtrZi6G``2rC&T4Q$3Tjc9p)^h!-T%S13*t*eD z4D)8jOb=bkYvd4shvLZBNLACD6*RkkSA8YHo%WjzEb&um-2U;RC9^n{<4AE>&G}1Y z`BC#$_nRye0_ zwG5FKU}*#AydD}7@tzav^Qim|OY3WGVx`j0_mlaLYc%+0JFR&JbZn}Il8)p_4WdWu zHry^Cyh1Jqn%<`#_r1iz3u)*TA}2}hHGWUB3^zLl!h5Khi%nwjCoeT=d(Ih?l%3rOrcB6FG>dRDo#l(HBvyDrY$*r&-c0kf*gi>0VDewp%6-9`?l;C$OwpbER#x(P&C#dJ^OMxloN`fzj z-63msL1gIf4e0=Zw0W{sO=o|V{n(?znAIuZ6@Ji67E&20jNj>?Fxk`>D!u~yPB`QS zP2+6+NqaL>DeQ-7LM?=}fOa;8pHK{J@VL^?ZosOXU~ZR$H<51|zqOhus9^}uqt?Fd zT0D^5&wGeqOoGFF`);xy$2*+Gf3bS4ix}||h&f8AIZ`@Egj)7@xa8)G8+nxSXJyJX zy#7qN-KGO=g?}7tjHg9u2w@jm(`F~%mB52UaWF6^`Pz?QS?Gf3kQIC1S!)RV1WzO= zI%hoO4YgOLFdBYQ0SyhFhFp}1DP^c%_@{Yt>os3AIrqo@aL(eK1wb|mE?+Xdkzr;n zDP(*^3`+WpHGD}99G53T90gSZsmvb3&1L`d76}kQ07d>xGS$drVo3jpR)gogs?AE3U`PrChk`o(VNyc*M3eYO;7pt7C1UpZ&9!xCQOXoqttAOZtpji`J(*N< zkI6=}`oL*_7^6{B4G@Q!Fyq&MHygi59yeBJsyc{F|IQ4^!_8KL4diK$B{>|@HZ{$G z-_DG$X&13{JLB!GuKKW$PAx69T^(JR}Qm_V!c1MJ7uEWfBYe9-{ zZR1rP8UQ}_U`Q9N_%``ArK)|SYA9{~Y6j&OuQSz7N3b!fWYUMI4_&X(REK%ief#O8 zYYEjx1!^;JAh4Oz8EWsFkHuxZZdz#TcZcQGhfg;OhHTc}5_dO*Z6f;pFCX)g9=PbA zOa$hxZ#pu_&*op5?K~UW)y(J3`>ouv``-*$rWvbAoHcNJVbD17pEOJ%Gwc$4>RS(? z8mur`tHE5fZ7yB*9x5$08OFeRXZ71<4iI;Fz5M+9uk28ytXKHs75LU9D((uh#ny&f&X+6#V~sK&op^ON|mWXp7bNK7GGB;Z^VE z>a7EwOueLtrH@=TZ?*N8QK|B|-voe#VFg7y1z&JyIOi80o$ zuCoi`ML&r8p7}NH4ev1}sd(7an}H=ZnUDm7*G$Lh(WpVzOMB6WNI`rRtoGt@7j*vc z20ztF5@UMSlL0ols>+qYk1x>aWL_*9(T9oK-M!s)kW$tiB0KDC5SZ?=8dWH2I@4P& z!M_mAKiil23icS3GVRqr2EcD1?RzGDYFV{4;S+go=tfeE=1OPR0-rU14KGaXpyN>h z@mOO>X2zT|001^8#W$}%NvwHS`z0_nu19^dB17^-ibU%rkNpoNy>9@+g#_7Gk|QweKGc+M^`N zjDg4B{cPfS4VC1;evuikX^|oS8GWg`vE4$>mB#X3r5DBdMN>Ek@!ly;?pEI%qOiK; zZ`QKBBF>QyO-jhbX8OvU8mA@1J6g>4-qvwdELFhiN5uRwt+JV|(TGxeD}cN zj*r#k4{0?&Qoz~$Z>cHc7xO7+7Jlfr*)&JIv_SH_G}^@BiMsXNjlyxh435_Ko*>B8 zP0}gx&mkYrxKDvGY7a%}YM2+j4{ICNR*cpPaGcDZ4GBg>Y8b$)chs+DLwDPm%qZ`d zywZrIwy=hukD}%H9_ogaZ}QsPIm&wtoNl)v1exnY{6wc0iHi?T@1t=bG*P!_aIS{& zr1sV@EkmE4r`_=dx8v1FlTl4z)0 z+~#wy^9)3?We6`u!jrI)$0x*_lkO^cXmHv*e!z0}%K!d)UU>~Oe5P7c^UO&pdVHXP z<|KKY{i#4&d|Fk6_r58eOZu@ms`1xzW&22`v^3SW?;_Gpn5620X>62?`1OLc{;rL4 z8!3<{le=eT^aAmz+rFpYbP}9;loPeHm1r!eUdn0jA?Eo5fvSGWHm;5lLtvpUN@^dE zYKzJKHFq(KH^Y{OI)5Y;5HVRZTIq_lZ9w?9VwV9}d4GlD4EaJSTU1+NJp~`xHu$5^ zk5!X%RM1LGyJgB@d+oEhowR#D5BY~_OklJash2u6bW*1ZC0;>t0eD`)kXGnFRPx8$4hf zW=yf{50Alg+~Da#*0{QHQpSPI$vfRp5;w^CE%vCpv#QRn<-sQmO%c%F6p zk_Zz7c9zg{sK;$_6{|iZqVgs5*R&B)bOBcIGg$TfVON7VX|7H2E@rPavXI+P2uK{s za6l-=@kU+fWN+TVcxVPmxrjhp4h9driN}`OFN;Zk|IF>;_AwV!;53p-xtfGJ>}XTA zRUHJ&e+EiUlgAQ&Ncx~Ogn`)sKIgS(aCe9+aq@BOWwRpaVD)7eAG>~xCw zKln(!l|QWATW7E5FNid$Uz-$}GnbAjFav6B4(C$x#+0{{;^GBh|1EA9mfda z!5F$mfz=@Gn4i2Qkk0zH=$@H_WYnhFeo0gDE1l0eZ%4OS_i77AYtxh(95BF-Ox8tn zm#LV9AvPS@m+HOz3C5(BylT8GyGSKtz@&#f9wIvT>zz9L5AFL8uMTq4%+%lB5>FTC zcrP)%byBXepX%zQS1d%nUXtx*;N0g@nFAf{a@p1Fwh{3}6;dP=xZ@J9lTf<#$|tUY zJ=h}PFSlznJ+>Cj?^l+Oa`~ATyG^6UV%ZU)B*p%^d)hfkw5bL<>yZHS``JEeNx9Nsr{dL#pJ9w~0=^ z?cwQOfOp+o2A~K5op0^BmamsgQ*oC{>@L{KrPCxAlMftHtq!KykG;fUN7<{shnx@d zG=GJDN#3k=)j74dk)IvQFG=_`={sKvtRCAUXrXYz)-HMl?CV+;(t8?5u!_{-QTUDb zE0B>C&DYXaYa}D7>$cnTq4J|p_JUe((yfg0YF5E(2WeaJ`xVGX{S5?Eaz$XO|8#C; z|LxpfYnA8+tvDm!M=bSONZn7$9*ZKr?)qs^c>4}@I82J)Qp!&4!Oo&B_u%^-HRKny zV39kQ&A$p%JPB=qw6MykKsquoyxNq4pjZ$!AukDZ;2CZ>>D4=;BOE-(kdUL{Sq*^_ ztc~x6m`2q0^KZ!CbF192NZ?tI*Wr-t$8I`d5K&LfyH*+DLh>{M|4R?riaE9DEKwJJly()Ew94 zav~Rp)I?V>t@ODNgE=X7rZ6b`A`Sh zHJixQpV7*5+7(7G57VxKKK>*IqNE&BOCgt>rAC$P56vg)1$>rZ+RJ;SDeEqg+zuWR z1{39dCeSs}L`*`Md_lCS25#f3ACp~k-7WqMdHo7*K_){Uw2>OulOfOZXJ1jPLPak7NS;cif8Xt`f#TL7j8~H@*)wXXEb1kxWz-ktFmCEd+1g@h>M=x#_?^ zqTz~`-jdKNkZT6#ju6%0pXcj>JzVmH@0)r${STD;X>wSfFCG}{j4)(kV+k3I^FK4B zF?#CrCU`W<*1QGma z;BhIK`h4rXBX%82)HLWiUR7GZB1Z44iNER%k5CcaJql;2&*EL3IdT{3y}T4&vRZ^ir|x^-I;Qj3 za~tY|0t}w+rM;b$5~u0id1esB!=~(!zAZ+3<==R1mYF;~T>>}Y+oIQ8jL%YY{>vJXU`Lp(` za`-&>wv3GVn0FxR{%`s5Px`0}Nv}529@HJUonf@^akh*GVPav1--LCVupr%9OJp$P zmmk(vEWY>KiIxISHYn3J8MvM8q$uD1qpbZJ>@jE#t=vVwqHJuJzCHTK^XwtCDmOM1 zR`G18`phV)f%p8*a554?qh1Jly?K%9Kq^LU$eT(>-QYd0Bx+Rcj|9J)=`TqCC$w=Q z{$C0>c$f7o42X}y_Wtn8@0Jf&3}4SUCscI}LUlk3x+P2EFIM)6qxDmUCnJ**t3P}? zOUxOB`W*q9y;VT$*1?jqs*ZoaTHKB5Q0E;(BBQz(2d!IyYm;Tp$!|$w%URBR z&4EJi;9A~JeTSbVel6|tB@~vmrQ+4WGrfD+|BS4Hg;#@V^$c303V69?BycTWaQ199 zJ~StNLhlToVfp<#fePt&Ql}zhl*3fQWwVt;BokMi;Zyt2)ocF8No*0C0zZxKo(%X^ z6qE05GVE&!4%GdGVhsYwDOgUZul-*weF^)0#*E|4%FZ;trRM(&VngUdqmL@0x~w;3 zC>|}4vd_XTHtx&IbZm-CIMEQ@nFIXV_ui%iNC%8~{x&GSTc!zslj?Q5+kVX6ZcO6| zrT#RAm&mMI!lg)$b?Kwk7C`GO9#Rovl07*+`Q9oR#im@kW^Wkd!F-O_|tdrIhT{buH+V_B1Rn+JS%&IX}AF#^xTLv)f z7P}|P{gh&~#L&ed_mh{LCHM8Y$sOeSz>J{B<3tF_bK&eByI#NVWGH+1N9CRz*sW%3 zsEL2}_dF+#ha#|pR`tDjg|(==PwnMA{6q0UbE8-Aj2}qjsUP;SZ z*XG8RiUK^gUT-r11Hcv7zaa(pt;=Tq9FRv;|reoa0 znz@;Fq_52-&;R5og9tGZvQx9ZlW3y14odQz-&$6PZqIk`l#z28A)_|--d|F-)z-s? z-}Yg4M<>m0501s}5VCkNt*YaYfxEYnhjs@QV@$bXt{m%g|pq=gHF>8e` ztXTC*T=S1F2QT^V4r-!Iw-WZ9jNy}!S`?L0lRF2*+%Nc1XyV*#hbfDf8HM4d= z_z=*7n3?O7jY0gNCOH@NPQ%S86v&DfxwF`pxA;!J$d%0GeQpcS~Y zo5hQ#A9vv#5E-f{NA*BQ(y`idM?@>(U+y8CIIR`_>udC-m?MsLGy9*Y#_daKupB)g z1aS94rlMDmc%NNl(s$=?!nu6=OR#t_7`-UWKK#ue!{4nn%iCPSKYq(#BJ!R(L3HsX z8u;-fC^Az#z;Ubg*p)6t0`UB#P-xCA`o{tOvElh4$?sCMIQbdPZs ze$l(yk`OpAisLD;+i`&0??Cq0*(B_Dh>W@+l6sAME{Asf@q!N{-XUm+-hI8b%d@l8 zkEeqvgAxGGme&eGw6zS|&iW5iDLtUIOZ5iQK(Q$-wY#Rck$(2ngXyYS1hd;~zc}XW zyp~Zn|Czqz>@X@u;*Nwh4JR47DA^sjK^YS&+xc#XFV44vuYI=Mo>P_Z8tHylhJ|oTA^)HZx!4^}SpU%Uyx8b`!n0m(=yej2h+^((tPD*s|-H zJSlPf2U0!vm}9|FhaE{up6An?DD6xspKo84%AgPImk$^fBC&{(C;^0W^XsXwTkSJDl8(#aMqQioHpL1e%~=86+SL1KAO?Bzgk|s z5?`&{7hc^6JKW2ztekMYeY*A@r~<2NG z3)bo&yeek;$SC8V;SC=VuIA;rs$X`cw2UZ|`HS2`;{U(vf3f%fP(ZQtpX&er^(VPI zY1?fpv+dMDq~DrHcu!ozRvjX0`2T4b1?zEE6z``)Pj~k)t5C|&{~k1&P!237L6%EGdV9eEDHQR@Bu|x3>%LX}#S} z!E6;cDA{}H4lU80*#N?iRqrc#dLUsuv_&6}ieJ2ay;~%$F+GKRS}8H8g8Ey=Yqq_n zBe@1`=u~+DF`h$^c@Bn8sxmt(KTV=Ijz^-$(r^8?!=XOGx2fTI+EMCY)k`+gVfAhl zPMo1A4@4RnyH|2G^1Y&buG!x;1S9MW9bPx-s7W&(`s_bkPJZZ(+wW>`LMHKy1Ej*Q ze9RuK&|>(y6s_94r+I3?TY&S4Pz=EdXL@`u;Zw5d0=j3&3M$p+>U@mhN! z|2Ftu)dT0ay}h3aA5zAbrlW}eYl886MjdcLH(D(B@o@fvTxyrpl$ds>m9mA#bL@f( zD_ZST0am?2{CN8Pf$_ucH~?~OH4)^8U>xGQVOD2ule#J28ARp!^T}bYYn25VRwh<=F+bvz2}KVc=7%g*;kWwUPBCamDD;z6vzu`q$Lk z{y`-Uph(m@G1J%E{A(sJ|2sPCmAKE~X57-ylQfO(zX7DovT=Mjjj2JcDIV zRv&|?UWx6*iVqElD2!q{u_|QFcs@gJzy1YpYxVNHao9D?Snh0~1?I%OII4h9#Z$gB zLUNM7t$!*mO?W2rLkD|EOJaIlHY;&eJCJg5m=Pa(&+B3@Ecu>q2A1m94_7vgO6#2h zsMKj`jHFVd&T!fL`8#s?$7+5NK0d7>9{MQhv)s!i$F8*Z2vX4R{1@}lFhA4n;87%Z z6?10FewglZ*pQ!J;j+hWdYj4Ha5CfBr$T4wgCeBV@9B2ed;7Tl`N?%=ez&1v)xh0< z8F7Ex?*C6}X=~8qx>_=I(`|7-3(ft1T|lPSx_ohw?4-A#EF!EO^=ziPWK8loK=OQV z^Kf2+bl0i2JaI9y)wf|c6n3;R{c^gBi8Y#?$MDESY3$4YF_x8gIl_x=H6Oep0T9E- zr-MNPYM&z~ud7DVrYZ~@pJf-y@0;Q<0LL|s_Hi9*i-$A@6_4Mfh>|g#AjX(hf0Hsa zFmi~h&QTg5;lD_2r652nI`AF3iqhQZf>n+{bW?#ah24^Bm zeBak`AJ_=Ox^a8hPK*a#rT)?L9auyK)(;{T&!2TIN4f7u7OHx!$+Jf&=drhcb?C6N z1A}kTGVFD8n@Mz-15-ZQk&ZfE8`WjKUFvG zx{W0u%XkBR&`z~dvk7k{!EB!>L7Ucc#o;w(-lWXfRgXOl)Y8L59}CSPL^8`7#)qF= zo}}q{SeJ4`Ueo6S<2ZM*R(ly%!)WpW5|RY@9($9hV8S*330Bjr;xyc^dXC_0 z^jns!hnqbU%FKb^aK_FRHk;y6KAMJa=*>0cFI~;$jtUiV)wV`T zPPj^P0CuKw5x+`+@$=J($Tw#3i+Alp2ZPN)qH)trxwC<51>gs5Sj8jHqV=%VqE*gP zP2?jZveq*fQ9l}_-KQ5j)w5?C#pk>K04PB>!~_T*le`a4_nuTNEq0ZP+zylEblh*A z9|*(Z7Fj)MA1nVzPjhu{zM6a*mPupo(0ZB3eP|p)W8W_IB+PX(Sg17@zGx3$rV+A* zEIUqpH`L=Im7en{`8zvX{w@U}=!QB?QpY{qk{VVGzW1IkeT$Gr|I)5_CSHbdx7Dzh zrPXCfx){^c&jw4TlP^&zU>hF0HCAxqydMkt-}63w_5aYl-rMb>DYM1@P0$vfgw;Nt z9L>|rUkb0()CpHBEeN|0ah{4wFVwAk?WefUM~t35A#mFBsDm&1d$#s-o?q+tV(?4Q zP9q7-aN)djdRxPQ!@_zLtC@6_%;tJDvt@3$>WS^x<2GQ{r_W)==Vn4xt_@{sToMI% zD&`Vu{BWSBNObtnveXmG2Uajn2?fLIDX8s3IvptwzN-c^3H6Z&qJPM-HH4`o=* z8ttDfL2ak|?2>=9YcjZk;)r=_VvIIgA{qK)1b^uucqkq4hQBOw^NS|xw#)BI+FJwg z^*MRDQ#SSIdaIX!Rb3(@gOFDOJ8YFty7aU)Mx5K26MbuQ;=?%$Nk0w$w zNKk5|+s2KBUJ7dKWx ziBuaViCcN#8_2gn_V10aR48kmJjwJdbPH5evxx?HT3Tcbh}dncjKpw!j%~w4${nf? zv-r4vt%G7lz?hqWg;A0V7cBp!L)iK-#ZH5!DEAW+b89wO;TE|Q>lyE3J^=%Z0+U~j z%}3$|Bgw~Y^?P@ZYEGL@AD{u04Vq{GJk%9)CIyL?FDZa%%wMMV$inT2VPnyr(-q=u?iy!d}Bafod{g#n} zKFC1~r5y%|XeNq^i;wp$Gj&asNetIHNqay$tKBaj{7gvN?E3p`eHUBi%gUR+6)n3S zmbU34q2boFuWZhKy=MH<)AMwHf>^D2+eU;iFpRRBAyJBLn)>d?=GEQL>HAzFq=#mc zG@?+&vJf1whhY*LDb4 zW6CnNE|mYCOri@!8V!#(@M4W$$$S|Ir#%^M5%D%X3)&8#MlnH~#K?6}JdklqZg<_; z=%Ly9|A_hve>(j4e;k)HHQn7zch^jJH^(q>ba#wRcbkr3;^>&J>F#bOj^^Nq-+8{i z@6YY`7dSVE>v6xXdv!!f&dw7sa|F5bc37%gZtz-gB$|qS4Q+h(E)X^WLeMi-`4d-7 zDO3%Dbc0H@v9XL;)|*~?YzQk2*<8Gfev+&8#%VL8E_RIdB%BZ`Y&nn7cugYmrqR+N zb^OPu^976t^V+1NEV`}D;1iD^9v)kU&f##q_ZTRC?>kV8Y3o>wH6@iK=k3iEYTwu` z%;3}0@wAoIfZ}h*ZTezJ+I)B&$qv7)#wTFRLfmpA1Cst5 z%)&hSB`jcq38zNAs64YkRcN-f8%G4b$1D4wamsFGE*%N%L5E&_heWJToH0(2O3<6( zU(}p2$ZGF+eJ5y*d?@L2VEr@bQ1QX(;z!NLU%Mk2ecx-{Zz|CR53TAlZ5EUZYzY4$ zZ2rpld>(C0VR+fDvL#0 zCiCeqJZE4=%u(k992jNaXrAn@XmMY*8{iRRHI|=Dx_i2VG3PezT{`Wn-i7=0S~e_r>x5@oZ~Q4O8LbH;S;`t*8Xx;sh^!Y99PLRniBO1{elPR{ zc!0L33J}e4-(kirs^kN4Qrey!YKVq+zonSX(aKD>p6F?QwS;;$Oq_l+5bD1VdlSI{ zyc&3SgJF(Sh&KKIR`y$6-mjsXK6{QZYvAeM;KEa5-N&1wV2cLHXBSiQB;B>7V{0Zrx>cuNFv$@$4??e69<56m~ycPQa32 zE?yB__bJ9)3yO$+(3NGAB35`E5?F0@{xSVYz`YIE#fOl0`SD%#+SZmGh6-UTTXkQm z`&2_GWMybj?CN1p;BlPSXv2D}Hv-U=4)=QZ3iVnpl)Onx`QOv7#mv}739}pono^qV zONGLI=BoTS(?lkh2~pT)S<6y*?usM~>6F#PY7Uy&xW@t@#2n zrv3j*H7(xx7T%M?bD3wpe)OuK3g8-po&Qi*M?zRkiLU4T6{aQq}&%ro-*(0fYZvNg&FQ2(*zt%Lg=bYugXis zsqg@k-^b_Wzt8tOq#rEEjcVXz-wb>uWQLvDC;ArK3}%iCU>4fiCB20}@B9ueA8k_8{@p`C>%bOr2b(6uag+|<_X~_B!WNv+ zujgk3b@A&V&8~bKDbI|(47j!vOd20jX}qp@aczUj;4&B5(r%?G0>VAE5R}^`W|kun z)c62|2BV?4@8Iq0KZ?|UlHz-u53WBa<3wNM3HfJ>A5e@6`IAZwy6US@+zs)yn_<#~ zYaBHi-?Ds}w431V+CRNK*5VvxRQB5Xtg z+k7sv&|4P6TDbx#KqnGea^^3E1mgoD_D=w2t$^5qNJAsdhh1GW&d8n*9f00r@iIjz+hGm$N-Xula2j~@eQUM$P38GhR4eV8YZE)=PV*G*` z@~AgpE4;89%-QpdcA4wb;<^nY6lXUvO7xIqgY~b8Zb^z1g7|uW@ztFIEM~etqsAci z;W1p>AP9ldhJAlHHwlF=fLC*D;pc@R%zcV-icb@c-x{-a{uTeKYrEIdQ#Z*sfXM$BIi$9g{x~zU?{gi|3(jcFJ z!0H7@=Q?R7Fa{^u;`OeT2C+ka=6T_xtqj;};W$4XnvE!~%)uVj=B4$^>>bk`+lYs%_hZETy zDqlMtseLPx6$UxXom7?6HF$#N6ZNWxJQlUOe8?22Y^qght80$z<-X}T;!9-%!q^X0 zbFkuMV@@4c>JPk@e%|OwsK8HM!yiiK2~K?warq(#{Le9Tzv0~-`w&ly>+(KeuP_$_ zhE|HXYX6=$1K$k?o5a+?L74dKO)Y7o?5&wHW$mLtd`^$vJc1WRdRo1t~Fs)7BBZ_wLeS7qM ziOf6a<8YJ~qcBFNC9A!%t$?*`Hm7Q~({eg2R;Bj;kDWZ;iRttru$;Pj^}j%d#)-*! z)%;J-@CPk3_-=gfx?uKCmd6k}$V`x|i?q5c*YlZu@#YJX;VxNM|9xg9)j^kMw5H8} z6op~W^IG?Pz_@aXAd8FHbs++g^m7PYRaBUAh_(>Hm%VHB%1?{U@B};RiE2F^KKR#~ z6yN39u+5^)M$2vy02E$RrHCgRm@KfiUTO`qjt@hdJ4P)(Da?m%ur6>bg4A>pl$kE4 zH*9x~%9%IlBrwN;o4N!DRs`g>qaeORCdtdx7$B5sd!nT@a>d&JZJKa03+#fbIXjao z>Du@lPWC<}vL6y%tPB1i!Zm}WSAuwo-Wn+n?3xcfA|*qp>YX-y8_5Yo1boIw+-jS* z7Srv6{w|?Y7M|@1sAsbp+fGt2p!>cU5DEXU=P~_?Ioh%eO zt>&NaN}X3g9AXjvKF8C7G^Kx~#7A15~^j(W)+%l_R(MxhV$ z@_E;`EkaQq=u7=lDwU_}+c#ZU(lM*V-A+|h?P@nMBrajmn$ui>>3o-O)cVRBPOS4m zwud*>tMiFTF|XW(eAPrvK6NkpPwW%z?}VVV+VB2TR;FZ6ig5Bn0_}+7`7ny>B)JCd z4GRwL?QK7$C+hzdQJv+Y6j10kL6(kN7zNYdSi+OT<2|sYk&*B({RILfWU168HS4Hl zSx&UCD)2S^8j5HMM;m7FZyTs(kzn#xmXa;Mr^lc-{-gE0L^7ul)%z~*$?>xl$HhED z|EVWXlULQJTI?Sxq2Arl4X>BGB`o2sa;wNewk!psHk0d<-76$$rmBJGBQU`etwJskXr#g>sH}%#TY|Vxjuc`dbEo&ibwm@Lh zo5ZQM9vdNw$yzGi*U48kI9dzXP?(yE@e&Bsr=pusi1`>c(y6Uw^V<1?W52Fh`yqlO zD^Tx$;Ym71kiX4H^X5Yq8XR|GI5GSjwc}J6Q|`CSAAj)+suR1!BuGASwx8{~qYOBo z;6Ts(3pyc)Oim{)ZHv(M$3yeg{5L;{5eRGOyN{R?%x_an;nVZl3~qMc5%+VK=g8p; z*3Rk{kqn)jE_9W_Zwwx`~d$ik@PoIF5>$Y%Rjr z&BrH#_UcfC_*23W7E|Q+V?mJ5fHwD+(BD2Q zG>M$r#V>=D0f)lvH&j?Fvy&YRq(2>czfm|q*~^v}@krSaohN%#cn^xKcR;y=9iChg zUWB8IvYRHtlP`l+@4MO4qJySR8gT9S;_Zz$R_(zQ6kZ!I>T{z>SZ_BAptTeJYO@(> zP++xV(_Q*$)$N2lrNvk}`)SJIb{JM2I!8yX`Z{aw`b|ssN|sH)Vv1@O&%!Tf?v1bG znHEgXODK+G?9k?&JCyE8kpjgFm$B-WmDCDtfBEXW)+Wz5il-lfw{3Y!rDvSpZ`JBK zYMao!;Fcj+$c_3lJPM^Pk!gTpeyeBh^G=N@cwSrWIHjbbV#KKv?4@kO^Xs1c{;%M* z!?7V4HQO@JZS`0j=hW3>5|(%)zS?oa7vnlPQx;z#*^K5Jd|_vHs zh1Eu#Uq4sOEHB$a&pOM%Ef+xo)=C4Fa*ciZnlSsnH@I?GT%!K<2xU|7Bj(G?#YLmB zQ+xkucHJPM*Ch>ztZ}oBxMeNBARt-Np_6*kB)JEDT}*!YK<^82?$SafJZjm|7{Tz!n*VV3G8d8ohx zFMY3B@-?2AP8xo)sFhQxPHtK+9d@OmAr`ymsc(#CSzb!GBz|h?{mBaRRV`6NdSR8% z4F>eANwVRWVhYge-=^eKOZPH={v(nQ>T&ClQXqOHJcf;7*l%lx4f56OpfsVG#Q(8!AUwFuLnQ#Sd4hgYTZo`f`C`| zL^rvKA%3`?>E*y*m;eFRy!z$Q47My3tfELst=d0Ilzy{H zcPX}bh>$P=0@BLGi|fC|XO1>OX~o!oqY=bJU+Lp?U-p_egkP{-Ofr=fERN+Et%D%>DZ6-Gyt!QO+5lABjbxAZCvKQJ@<((t;6 z58Mp$o|=O)!QFIL;846hiDs-|66HG+z@=x((SCb}#tn3pn$h*R_X!%v@g&5gkB+PH ze}jLQW-)(2Jc{cYPg3>e))WIy{6j+=JRD0maYF`ZTtWkRrfbompcqu-29H&!C^(mMt%5Q0Zy{hf3CADr=>74i}Id!O#Z#9?A`_(j8ppC=Pm`^b4@6eYRi%PW^bvzgx-1VjV zVUY8wQJHDU3@T$>JEw-YixNYO!6x~z9j0ma@YVa;yZbcv{&r5?JO8u*-a0Y=W%u1r zi9o;nho;<)Mwi0Q8wzJ{>S}LTBl+rDP3y_;O---+ESpuB1aX#0tta#3r+}kT{zI?Y z{34_SDwLz-fvBPoyu;SY^3w$9zKL>3AWh7La28w zh&NDGKK~6F?*4}HiPkKz%MQ`p6J7z88wook>@ay2KS|bVgbtuvWFLCZ!$yq`2UrPJ zBgsa+)FDM7YGbtfk%*kD++5g!JN3}UflFS+j-Zxo{uT|Z!irracaUe^3t=aEAw9Ud zVB%n6GIR4#uSB4CpUtBNypEGWKgP@JkAHLMl8efFS=xGK^naXTV*8FvO)M5D*BzBP z+&7w3Fb(TU%IfeL6(cxL%a~kOFlHFyjrZG1jR*xG&uzC+INk13 z3gBCsCQ@QRO)3gm@xH?3i1-y7H0}y-hbfuY&AiX3wT=D}Eh=_UL#(`DS%an~@uDb4 zEyMBz8Rwq%PgqCT!T>w6x@rk8VcDjSDc;@n2cmYJj}Jk3(HL>e>s9r$^-Sz-Zgr(_ zk9QJ;%tPF?=i=%wva|Zho{cofoi}<@FOhO5s5J6LQP(VI*MI5GC~N{Z68fi>Vd~)< zG6hJS|K$?koicVAoe(h&=Nj^bjay-T*jk(I5 z`3b|xfThuO65S*`s>;BgH{mT-H3yOnuY=z63tmW#Ww|0CB5aX}*SAGo6Tk;w-gA&*62N~GTg%y-%) zLRl&}b=9iXt^#lgBBz@e#{ZlfW;LZ@gcvr{hUrJa%YhB`r$e~wXdK(sof-pMY~9jp zL`rCmdY*&d0lMV6VMC93reVt@)VEV#-~5Kc|K!-lFOpkD{^kh0O*M#4IPU!_sWMpO z&7HsMN~T^p((vtKz`60b%C8ETjU7$+bfclV*CR$!l)xQlv44xe$D-a^`lo7v|G+aO zZ*<@{%-fb9Z7DXF$vB;tRpX4X7C1+5I5f``qbGk##CNkz}h=$ZUlXk*^kP8qPAXTI^B(KUBs#Y=R}YcXAbu!e7vqdhP?J`qsuT#}!^_V;Hi_jqh&+NT!mu0!CK6_Wh8ssn!_j{zL{W#Kno z4R>$8&_{X7pykcrXQ-dxSOOIB6cJa4?)7deI6J((UIa`b0r}3kTsPF!Bzm$6OStBE z(!R&6geseiFE?aXQ>TFlP&Fy~c%ne$t2;B9!c6Rpv znd`f_g18Hn&+$58@mk9DzE5awg1;$#$%$-%#TQFK!XwRKIer10U#bpV`1P)jEZDd5 zgR2=H{p6L8?K!KCKL`ON*AQ5buLxoDFhE=Y9}v_y#J+K0n_ybZef0h&PQQ7V;-tf) z7T?*gI9mV6lR#WrK-sTV`^fN7 zW(5BG#0aa_{8NQv+Ko)2M>s+t@eI@ZB8U2cDOwciRdQXqoNMOK+a)ovMN}gLNyseS z9ZbK!jLe$G(>)5IL~16MCbs=??Ccj4y&>}U@qGz~O*u)~@vfltpAJ|3>IS&2w3s;7 zgB)31!>B4qwPQGZ92SZC=}QVI{d9)9U6q&^6=S<%qIcVPBqDSOLrK*$f(U_E6As|a z1xu_G>)%+~V!`#<>M3$}47I~UG+b~Jcho2;u1d%~l9Rj&z>XQkCu72!6DZPklbcMj z@k9okKzJMx9Ebs$V%=#K%0oNf5H?tGl)=!35oE=dRpngE+a27=--2;FefQ7&%|SoM zFS&G{qbIBNlBnvCH4C*d&M)z3#C}uSKX{kL?1Jz>kKTJfNb&IQpHQw`P-b~VJkHA( zp5|0|wGS?4e{1(sGD5-;W%lt@ZGi|};-NF@zb%TKG9e9djV3YHa9}cWvX71I{Kiiu zrE7XF`OKJ7Aly85r1K1o)WouA-0L|dH2b;WJ3cl#8HU-DT=!$?svPe?C3$~%39Q&ddu{t#a9q)~M>22O?7VQJ^mDqTdJON)QOrw=By0EX5=>u# znE2PcVA0SwOz%_VHJYRw#} z*iR7`Z`(#|c1O8T`88XFo8Ql2OwVO76IU4Gs_U8YmVN%q8B7Q*VGvgNfTt$ZYv!}_ zDB*ix^r~k95hy;>M9Pt&)8Ht3 zvy;0KCSjpzJEv={CnES83`;a+h3q5*1$2r(Ov`|BT*S2wq?c%*|D&ErZdLd|INZBGHxCVclg7*z`MJfE(^bl z2)ci30HX8}Q9x??2$|B)Fo!PACZzGVJsG$g!w^OIh)7uzzHbUh++kr?93DIFSjB#0 z?OYi=LK*1(z7RYvP8&JL;g~gRZcamGkUhN{4%gP2uDId)rIa0kJx|_AMcEWx%8afB z7~zh9Jas7)4R)jyhzgoDLA)d?+(wTLXY4rcK2tiEcH&tZs zXctP zWM49I5VYhrPhuqEjiLo;A~0el*f93}r7pIVv;j#^F;Ok}bek#5(AtT#H4_B>*41y_ zS22&>TB4YVRX)(5-6Qo!h~krb_^f0ttH>M?g^f7z@m=T97uDw0V!#+2?&{iAV!<~S zx@>B0cuT(2AA}+j4RE)(7X%k62@izwX6$e?7$APqRC?iN0Uvz4(0r}yZome$5sg0k2mD)!BQ?G5_H~ zQRSpEOc^Anw%?`pJS-2c?f-C!_yJ_lwO-TqWDkXfL-I2-cOT{XucVn!vBe5-y=_ZU z)+Xfm>=x)1qZ6{#YO9M*c&*wUIfa;cv|jxsJ3CK7mo|c@B!>JN^)V*HmU0^|bSHu#%YlNBLtfzGco)&oF)R`^0<`x7VZ3s2hO$NH z$Tr^ew=>@%xkVRu$IWa~wmTBQ%z0oqd;&T1ZOi<1_4W3syThJV%|6Q(dhJ-tseHgM z2w^7dKKT|=7ef8>LV0|B)cQW*O@v%nQi?)4Ps`A>=_4x+^&~efp>$Lq@*N2$wV9W> zkrzljhJo{F`TI&VM~~<%#LQSCdHwUj`yBo9!G+nA^#iFhg0g}}-+ivpSwU0&7BAcv z+sgFvsw5>Tq-lT8k`x~Wo_`99C9@OFc@eD8rYdeKs~em+zqdU9XYS^IGZ7n~Ot%A) zBWN+NXu2)o`!IT@DMYbrtC|j#hbu?n1`gb2>rH`G7(t0hd^ISU>NG8Q1*Y0LiRk`k zC+uU0dN_-{{B7mxS1u!^oyIG8<|83Yh#i&ry3OnLL`_`?Xf&>NCfKvd$kzdXG*!jH z!YVpBMz;V(^n{P?$WVx~ zPfEjfAJ{imDt@Y1zTrn=@KykVl&C)45u_~kMsiVu^@g#}2K%XiboXW~aD zD#tXj1h7j=cW5dVXIOWm57Lg4B=B3{KX&NKj+>xP1et%eF1&`xC+)KNndk+vbM6w= zumy9B?f%>Z{56vCwd(h_#z>i*M{AC2DV!KR0U(>NhgBmLg9@K>vs52U2=22b9skW*SD51!BvdtDT4}EhPLpeWWnG9DV~Ue)CY_{@xu3 zm+QN41_ZWRV1#*bN!cQeU)-UgDZNSucK4;sg_^O^oW+!nw#F-FU#}My8T7||)B!w< zj(jL{!{n;rVq}sSi;b47Cj1w?oiok$YVCg+)G_s2VJ$+Wg@0fb0T+z2klw@h<8QB% z?Pux6&KJLWHL(2FyU!D#OLxZJe&wDW*|${rl0yxmpxtsZhX@jG3|X-el9c4$mThhc zLp>wes8Hq}P|nf6$}x0pc!yoZ>~XYIh&Xj8<8rKi_N9oFI!$(>cQ9Dkd4B0D(zyPg zP~Sm2QuUd6Jjh7xeGV&@G47!#CLrkbjJ+}WZq)jUZ&BGiuBbPe zK^gmlRD^Wj*^2vFwsWw5&+2tkNipVl6g*;As$SfBDB}Y#d*RaP!dyw(c$n8^Hjc7IEBv2rrYfl$-qjW}1W_ zmE5kkJKUjp(4#fj6pSskFdW2CU|Z$|o3U zT=_$6Yp-CGKD`ePGsx2LFYBl7Z^w4LgS>oRy1k61shn)o6_yfrq4s96dOyDJEPI?nZRH zrQr?oVG!fJv;gdq>2-eP+$5_m2Akj}KP1!}0$eGs-ysj-3$X!xE%#llLvGwY;)U*n z4DsO4{{gr0v#MgLiesf!9WHQg^oUjb*=5^N$XC3Q5o+tuQ94YAzjD^`ON$dykN^3- zHzO)#p##p+aBrxD=2Kp(*!!hX!tL0F08QY8uTuZn40j&K?rx=3&u98ZvCRz1Icy(s*D~9TWaDFLDJ2!bSi?>r+URz z<^tnYPB$Qy{T|q}(l5OuwiN!5nq+ zY1ZSD7DUeYP6F)rpm|Sa-u8KevX5BvfA_nGE3St!-^Vcmlb9Z+4{Mg`6mP#?90eCm z6$kDR5OuaU%X3aWVjj`_JGa^X?C3>tb7QQ7h>Z1(Dlo;%;=O9)Ivk-D=m=a}r&)$f zT)}A}k2l-Zwbtvjh{rB$n9{^WQwGFvS^s+Ia+5^&_BE*WkYe47RxrnP!x9k>jE-ax zlZ>O4fQdfDzT}7b)2Px1{}cjDYb-oU66F4Ve%EaKMemoE;WlV@H892q%#i-k)<%Q5 zXM8Y6x0_MNl{-iYzirSgx2g)M64s`aZtr%PS^R!1xsOE$9CT z>Dz?Ok||gtL#@A{AfT(MX<<_Q?Ni8dAz~G8%PJZ?B#5jNx|DJ_QCK{fSoG=@G+3u* znH|ddpqMIkvyYTAaL()#A3s?6JO#;%p~Ch5WEa^dU#j>~BX7i3CC^MFVD?GbDzpAn znZ7qnip{}lS=txVtvYuTgzpVR_hNb5s~?uq6{*tAp(k(4af0i$&Gf*y{L$6qv8#Y- z(GFZLNu#N#6I9?hri2d0$(ScBny%MB4~JG!NP3?OyiUw& zpZs#i4|&`<9m9k_l=oks-H2aY)-67>9T)BkD3$|zUM$&|iDfA4F5y2Q|s#=vk(6vFLso}E&oh{KWHJ5@sF|vWyv&*N#t^; zqrgDlD8tcV3;WZdBluHMI6G@$PU^0hIC=>}Gy|e^&Mv0o_%Z+AOUv-dC)x4K{?Y%0 zM24D#xAKq$1~?Rq9;98s)+tg&lrQsEJCwb$2@v!~Jd);UqF0uzrW!@3iEb)q23R$) zsLpJN3ovm4t6-$&7?9z>UmoShSl3D4w;6DFIKk}UwxkBrc+!yJF2lirMSt4ti z!80of{kVRAik)e;Ps4u0Mk{g?l)kU=qIyRN1Y{|)^|<>?vK8V}-+oe-TjU~Wl14-D z>$-LM*A|6J%`|n?m7ww!JLWOs?TM-VwlTr@n@wsvKfQZuZ|*J&wVve4s3%@kfpLK& zftY!E&)98z>zLnuO0szRU7zBwfAxR=BuGALs+-0+uS zG1rX`K{{U5vu!#tKRgE_MAO^IzxCUaMWOODVm=3 z4lRYY4xi%o9B$o;Iv%6cOMG|Y{v1@b;S>JjJ6GR-RW0vueM~T3-#_i3R(X@Pyant{ z-aDg!9eEj5?}&6(`K--g)vBfUx*a{X&j9xPo@@y-re5ya&-xJrYjB6Ip60!d_E}>y$9G}v3~sibhB)g~*jnhWzz9gFFtKA8#3L@DZsZj0CtUHW+TMSF-JVzu$` zh6dUW7w;}-!UKx7S4-#*P*EQJLww`1KFa;9|DBVG6@n2?3dh&`MS!7l7HM0PC_NVf zH!isp4Lqn})61DnE8&<`!cz&0L}&{LDQB7*|0?y!f+9^P3vfm zHkoa|Z!uZzcg|{(s{so$N=$}}#Q#3#6ANR@TtFPJtfB`_jC~0r84Fo|S>}Z`Lp|?K z)_V?2OE_ltJwyqxbqN0i*VZ)iFRnCaW2F}H7rp04l?tZKc1vOQK0Vgn23sGsDQ-mY zIcD*D*T=6#^E)aH3anKOQ-bRPr4sb&vnp-YcOZ|N7B7} zJ$Cko>dmJ*9Rg&PDR^Tk%lj%%F0cI-YB(1$}W0ezGgO zcU^i@>ItaMNeWW?+z-0~lvCV?{r5nIX(ERarG_<9I-+bgEDEZ8s%@e=cZ96i?0LGj zcR}hUj$zw?gpN@+9~r{$I|^}6*R7ZBIhlfL-YNf@AY4!tO)M@~*@y)g zmi}Y`y#81dA|?Lj5Jauobsy@zyvsz@pW>FszX~6iB*Rck*H=Ss-1B}u9o>&Tloim0 z@J{pdqOR0Aqm+@8PmF=XQIhXqJD0!?4c;`zv)~EN6G@DJTf@b+^sN&<<0tK&Q}MvC zyaGOSmNL2+*KQ?Jl1hAo&AQBq298d*OB0QdGZ!m@{DI5TZ?u|({j*|N-(3jAu}M>a zb`+VAX67G2;vz;e#%03~G;=%#f4ppZd5Av$|tPYB!gnVji zAPC9x9I_ye4QdA4Z6Ra^Z@~SAH50)pp0&WlpH3YQ&;PzjQD?$7ga>oJ#NO%L3*4Q( zhZtPKRjG?Cw5-D~_?x=K;( zljJTIrH6@l?1Lsg&1R8LKHo`ewzPkm!5AJ3zb1Oh9hy_7eRh!%bfkp;H-p4$-nw;gz>Fch`b}Rd86r5xJ6O^Kq(pIfvA3he2Ue>O4+e9KQ40Y-J-HO8is5R%(zRrQyeckM*Ve%PTQ4;m zL2K3Li=B|B-guZE!W`O7^;!$=8Uns+7o)C` zO^tU)=hV~vDf)Qj8Eo9e3q99LywJ5gx2*g93|{`=<$Ov;WU>O{EEPESfN`K5!9gQ_ zjc*rI!6eULx((V|#kWIVRH3Vx8=NDIzOW0y&gr5)?3Ez+pJF75vRz01f{WBWo@?u$ zs;eahxNt%_&7b~1I`v|Wv){Lw1@VSq-3coSUco{;lm#ajsb% zg}1(m6Od9pr$L_fi`IS@J5#`lScoXn$K+xK4z_-d5q)|y>;jGe7(ZBpIxq!4euOki z#|!hA^qAEXhu-4SDK~#wJg4T;I`GXh+3S|S*H$g{EjmJ>j}3XWH_VIq{JVgZo*_ml)|3@7%hPm1rD-p}GC%vBUz zm6F#?Cxy9|^KU0r)yatj(wM-J=pknXlbnKa|K`_ca~=!N@*=%YbS7|0Z4jZvZfhVa z2T^xO+j^2w%d+-#OSzLAwwA;n(aVs;<&0c%#dN6HF^u3{ntkG6{GJ>jU{jZ4(vP@O zX;5mRtf_|KeeWBq)bJTN;oX)7g1<52m((KPArv#4vaQ$bdA?iZiyYsdfu7fmO^Q)q z!Br2xPxAN=m#7-UKvee%K0(&hFK8q>LPOoR;RTHZ*ugv^Da%+^CDw#rM*x#g;i_;;T43)|Cn>77}B;XRjg+Ka-{y1-{7H4bk=WOt1|w{(z;0v@WsH-kq4xnfsTy)WO)~0zjn!I? ztFR<-^lw=vqGysPdcE_9vR^<6lECiZOYX&=>PG1dUp}%?rC5bEtY$A%R;4I2zDHG#FsCUDEzCX$vq`GD-IhI5m*xE7Lz_>)G8{{nlmx%>| z=ksa)TjPTMrsOlKhv+nReL7 zDi=4kY{BYz9-ax(es_5B+4Z5NWxY#ncfv{y)U@{>N1(iy?WYYpL2b1VA&O{4PF>JdSOg_E|}pkWl{Rs`65_=yr+1x}OfrTW04iRyM{0F-h zTV#mu_3WeqBbsA@f3})JA+`na^r&da<`&Xa0B#u7opLPCHSxk9U#;{~q>@; z5*qvX?$}LcJASDfg%5_I+-E_{#%GdAh7nv||Jx*IgTydi6~mwJIy~N7FHQWnG*MR4 zQ<~u{<$wONrI-zloHyNw`O` zbI#y+uSG_9LyFBDRz&WS5W+pHwWzbsbs$2i z)MQ2eWzhNmEt7CJFl5sGa(g7`vE_XcraJL*I^>;$I2H)rWbW`e*QpElfqN|lnns59LDs^NFiJ-%5{?R@g4c)F|QD$2m2`p-)PW>ikKYzBo7 zBzOV&QhTv+ik=f`o&FRv@`~S^wnW#e7*gsVd4H`C;xQSZpH3|CW^r)CpO^fw(Q8@R zP#3fG7QfB(tiw<-tGR936sIDZ<+}K1+hU?W*<`!NZFlL0{Eme|(0c>U5-_$fNPp9#`>W3@|`gbd0OK z)ZWs zK+i0*omz6wCCB}Uhg2!|HmsPat1m0xMTC74nmd*+oS{{P$05$@-PKOp)`2F*+|$=7lHQkR zS~C!}L!dTZoQJ<{ML#Q<>V(v=r-l}WQ!rHACn1sZo=sUc2{Kc7rA+buDsV3Ocg19I zhujKaBFY!&i`O90WShsT=Ie-jeokA)t1x?$Z$@VSM*{L8KUjs{jk%ZZ5W zbN7-Y2fY4-&jiibu)$vMEverL8NIM(5O16Tk4MtLlKVZpzT7gjJM z+u>m3po#P#9hc&f8`e5@mS7M}7BH$3yE%TGYh=l?TkVe~xr zD8dbabuZTMQJ~x-FoBRJ!_Lf0$Zr!?QccZ}*ED+q?Jy>ujxJz(%cL~AW zNst70g1fuB1(yzPjq7RF-rqXsp1QSf-I_nTs-?Q#Ip^@_8G~!3fSs^oH7!lEBPTo` zLVg<&G?AW5{t}JlGwDYeuQMi_JD(moUDL$Hf$2~oJ5t*#sLd>|ehvCt`n}*%7gv+n zLC4ZG9&MJ;w{@ys3Mk!LF&ybTF#yWpZ_n18DCyDONc^+yIc<3f%7* zU$34T{GnEc!lBlz6%Cx#M|{6xyykp*ZYZE%zO}%US-$))4KM@M03FaV0Syl?&%(sr z^4VOdhqu1I{0oik#owSw=07!@hV8Hz5iS}MlY^o>hN4lMhl99rwDbz_aPRmX4mMiK zc54QykP;9ucZS!>U6SKT-d34RBPDQ>t2wghdm|uv>tR|jpS7jvi#HQ!HSwsoR&N|W0lrRn<=yNZF3~h?gt>BSmHDdt3z-WmxMm}cs7%K4^@BaZWp&W+ge*coLV_@Y zXe0SdW$hiJOJ!yq_h;AS@=aKdguq7hPAd6}N8W5FOu?23D(-t!$h zzX7m{U*E{ro8OYmHBXPtN^D+JXea`+zKXw5x+$zFCS`Wk8@}o!4Sm8z7rnvc=WMCu zEH;QItPISq*}fnunvzD9sXK)0oY_9!y7Tp974tr_g0<6IYaH5&TuC3_`bB2-tCV`! z&v=uN_BDPdv8nRL1GwSs#;AeA3iSZ5DBrUr-CyNiW4ST&Xnexno|zlFH46lHI1in0 zUCC|DR%IdpFn8R=hldU3ZcD zqYc_l-4wENXW*Hj4)DgmaHsY5Uc2^7AM<{E z44-*`^W0alM67b)?ja#2sYD4yRexCMVH8K)3LcGdqy#g^Ojs^GTXRL1Qi%~3lY>6x zo`yArtP0)FPs)FfdKckFOZk(Np&_Sr8Y_#;r;Wf}!t2QhG@Uit&vbN!x^VNe*ZZzl z^^&q}wto`(qa$1fI5vuOSKfB*x(cCu*g6D23YRv4Y3|F1P^Qsp-k#thLBO5C3%KDr zROor=vfSUz@ci+TUkj{+i>MPq5vTt$?8P5B|ln&<#4x7+y7L2kBa=0)#7_434{{J zIm(iu-^8cv7W`~T^2(iwqw2w~p#?#hs~%@5Q}s6~rrobAKAzej5Hx0wt(tuoT%Yd| z?};w|@v08UwX{Y%AfHVL1|LIylO;GFdhDRIyYV}|3&DsEtli=KK{m)p^5JA>Ww#|_ zM<}*TY@^j=5h+1wwrocrh*(egM}TvnAHn@^G2&r*?rRwe4E>awx*X?B@#(+X46Hjd zU?%6H$H~Lue=6X{YCT$L8kJk` zr08hkR-HEBgUv@WAj_FnXA?~ZjCjMF@-<~aVVF_0=GVwO)32CAJgyL?br-{I_Bt5P zg0gE<&eIF&Iy}!W8sArzkXJ$r*rumG=3aJR#rez3z$CUo^+%&2|LV0gda%TvJ>zd>w zb2^I363}8?0(c}#vlmewr6(V|ohGPz7jqsmnXN<%2IsdZkRCH(X4q&jfT=-ysWW0l z4#QKhqtsUX#3iC*y!eLC^1!s4tjYfrnu=i{BdN>{Py+HGw6%NNQltOV?YMKRf39@Z z1UWD6%$L${&z-G{DM4;_nK-Xr1K|ca)!ycgkfXHQBo$Kt7H%8WNMP^L){8fvtbUw9xCm&|qJq8H0aL@+&r}o%Wwz074Zll`ZGcWC2yks_O#r z@%8%Cn%)XgXGRUXx!tNWD*-CBwK?>xOoGXw#htD^WIS$|SNCCu&r<~kOxb`*Q)xPT z{l}iTZ2P7f3)#;zX*CvRvpU z620;6r_}1?+1>_*CUi9VNMB%KAAE`@uz+;`avNR#((Y@?*y$5DCYf~j^l)`UXe;Yz z%8VL17$(jGhmMkR3`*U0(I!yk9}hj;|!`)7P*Xi^2^WqeW0Lox1(uU0MidN1u z7E_FX;?u8hu@1N9BL-sl59@KO8OozN<#@hzy5^VmnCP$W_50_ghRY`)jFZ~Wg^&s3 zh$cH}bS1-C3AdQoIBK1714pUxSMys>D*rePoJN_)f1_sMOPce>&v}Td13_+~3ish?AyXMtjO}GEwbKNM{8+p=7*~Zo=)v zbW4X+?}Q&w1}B-1b1*&0ju-&e4|B#m@MylQBqqyMM>T*_J^+4bc+bWxmy$Q1y6#L} z`!h#kweX;RL3wriP$};F1#b%hEcihE_4ak$hhE2l^F^ zUHY<*KXJ7U1Lu<}lWMyHeS0fFININX>LltvKCpdno*;#W0s(hGir?$Hp7E=XZTHr+y9*4*!6xH%3H)MPSDwGaBlw(sprQ&KVVvZ~`j2x8*aRHW! z$t|6!Ol&5GuDH@a$=O|EvOMu@v@RE2(`Le>3NTqgego4&F$72f5P{kvLR)BEa$AxA zOQI%`$-PSo!jfFKqH-9(>L3#>lk}(81M4DpXuO5W0E~`BH(W3#5lXGZRaRvKq??yY!4xJn;pl zlNP2yWwBZw7Ul?vhEQf+dNXaEfdM;GiEN@NS}BGPR=I(=gizE7ADBEO{LX{23O)mEQ+^w)Nqw8r>5`wkxUf6J2NqMt*^R#y`DP%q{R_n} z*oB2xv=#3>&2yzY@s1v-aeZ41DkPRSSFDn(MkEUTHE-@Cd<3UW?ff~bJY z2*FFZKs^LxC=xD?&W`m3I$_1+3u2ZGlTT2Frrm(a(AdwE5_te>6Lt$9)`wJ?h|BaS z>;z3;$lndNeVKsH`Q$p3{X?3_*%(pPgLn1tqok7QeFW+&fJ-`=1-RP^qJ z_j?NTa^2;T8Pm6?ZM-`!yk)~P%2*`&jbS_CobK0p8+zg?UsZDn-On$s2oCPTp)q7$ zrKo-bm*F$CEKw&rAqX>&4*0{sikJ9T7CJD!2$-fwPAxV7*zSL$o>9wqlL#Ysxti7JU_XxN4{ZZB0L7K(&WXi=C7zBq}tCwoF{8>nS|A&6OK)UQk65 z5lCnC*0(>hk-GCylBpx1=9Rzy8uSm_mj(NyUP_%CDP*RZ`QEJ_FZRS#N5SjNw_!4bwErDeajO!f~-5#t?S|3@Zj@L29Vu+tbS&k zr_f#X=G?vll-7Z0xJHeQ^2R|}o=rxu$MEbe3#@$pE**xzG#5-x=r!7(8Add^aAOg_ zrWjnzE0xTiq}>SdU}FyJpZutR%kS*|!_iZx!IMT5@$8mEs^)E?>0zHWQ1(iZSS)J;O zv?|T!^G8MB21GEN0`KV?z35~|lGTwsB)y}f9TVPoe%q!U4KSZbvr^Hff)Aww8KO@SD-bTk7kNm<_24RU#9a3A4sO zOy+X;G*8YY$)Q%7qln<({lsft*W;uus~*9yJrrlt0@~NK^Ko*ou3{FLW`OJ3T{o8b zsuG&04WTAoGDdm3aOc%=*Wff1j4c#`OZ`-{U@3_Pcng#PIn0^T5F8T3cfedb=~a42 zTf&YadsGIVCm7Dg@LsxbvWA%8ziX1p%DpP3j7|9=F@FuRQ&Uj74VtE97fNs#T$J^G znS;wz@su~q3tCA7d7^?yIx2SIwgDMzRSp>IkO3R8+s8%!mosy}`4?(v(NU%M$arqlm9-TwvAnp@Uy>O*rkjgQ$RxDO1zUVOwE|Yue^NuBq zYL2!4-2>7A-TfI9+gfIf$w1h2S0 zn>0@D6`1misxxd>5#eMUBJl3bu!A8{fMDo?)usao90}U*QhRK}U^zXe7C5;|1f1d7 ziIKFmhLv!A zkuh;Qzvk#i*8V+cZ2Lj#Hrf$N?_q8;a~#Rru3yC&yPmu07>BY_gi33T87py>qbL`? zeTZ|5!F%s$&n%(boi7DPwSfZxfxm!|rWemS#JWD(J}?7DPrbto4xca?z}_n4e9y=3 z6;Zba1J}@25;eqU(~2f6REfwl z=V9Ir#1``_H|FzehiB=N=TF}yTiI>RrfO%4YjJ89;7`{FNp9{ZXV!ycC-^!`hvXY8 z5PGZ(Rg>IsP$8~|b#A^<-GnN+9+p$I7Ss9lL>~g~!bs-U4O!Pi3H;vuW3oI9(Zm_A^TFy(ZkFslE{#yw>W-Hx*P}84jg-*zF1spFxwU_bK0*vgH@v z*RN*^z7fN$xNG-nX>(I^@`#ForsX1eHd#I!qP!YjR*Gj9b;5wVFBD)ib=6lgP7f5e z>r;zDY3ubW$%9vx0I1m38>_wg-T6@7ztGFW7l7a3^V^S72cKa*$BY@7BRxM=NF#uO z>s_!;gX|PsID;{l>%foF^gGM?p8#26?Vw#XeR=htw`-lPCL^=~qP7<&C5KvoE{1}8 zIvMLldVf_q?`c^J!sGb(y_MbZsWnqEB1sB15@}EVs>CB52_hw(#{R4HKVuCOFSp-l zs&aVn!XPj~8O{39UL=@SaI=kA~^WWahK*~!Jwcfbc}02f6YGx?oCP^j3T z#Jd@HNglu;{T1r`n=`%sDh~L@qa6N;;NjRBe*0q2U#`l9I%Tp<15Z#&ECh@MKt!M< zmV>d%_FWybhIVRgkQH*Lx{0r0Lebh4SWG1VH_c@w{{{X9uF+C2@$?O2n&qHie6g7Q zI*JBE7~nY}+E=YfUHrjB;zEIzF5tOnO#4H7MVAZ!;#G8(OBJdw4<{^R?blu8HWxd# zz{z8$cum@SlZ~;ZI6%Q$*lKYpCFS!hHL{zUNNg zn_o!vO;57{qc;T-Ecm8vEj#rsrru&Kd@UQrHff0CX^XO;!m{Lz!`#IkMz!e7hYL6|#n#W)c1g zxZNGqm<8QAL;Kw9#K@|bywir#jL}7 zP3LE6k)~*O?$0cgwA@|Oi;>I{C^73rOqt$&fP3Yy)76ZiOTsismE<>eaEvo-8?BWv ziiA-f%ATeIjPvoJAKA;(Hqp_pWlic;0|vSl=t3{Q1X_%u>%d9{)63eb72cE0*V8-` z>~=V5sv~e}y+w1)G?Pxr3Srtuqg>6ikkW3%<>Gy4F45_0ws0V8AvAU#Xhr^(4}{uR z95%a@kk@2-vj8-&E;;gfxiUFSfJ;)-ceMZNM}|QIo8St z`XJ=M3rk=*;039Pj)nPe3EEC65t52PLbyfZWX}!AC)cLkT{?bz=~q6gbpqz6%Lr8; zijN6#7GSiG%S)P~=s2%vM>T>pfAE|5sow}!fnGBY5}o{+VA-X6*E~%SSbAemTyGQ! zHCp}a$${ck*p9yGd=NMErk9EiW4C1?mq)Jg{v2|GcCld4>XhPOetbik;jJNMj2c5A zf7tmEEG+uY#;$xqP@E9EY^!GQ5GnK6B5G%KuC0<&@sEkT-5BG1pl(F~N+p%zrMdJj zO*9KG8e7@vbJL-nHnfDBt#^M{LjtI79p|!>1Pgi5G+8>}10m8eiNDDJp8Yn{x`)$k z+n(A4l%8N#YupbSrR@DhxwJ7^TuzFFpYT=sRQtIv64NX zhMPscRM6_}g*PaaxI#}4^SPtdui&gJyer-$7uw4I1?whtG>2uXY1*bQ632dkCY7s^ zKmLZ;DS*2;b;tUdj7gc_Kg788_wfF0D^A;6_|01Zu*x(Y6y|6J2${{hzC?nT5NseZSf+?qJ8ppUlvDVJn-|^O8tk&1JoE$PD z>U0Zb*0Y*H_i0<0`cF@+_3Ksoqf`I{e=jGC-KBgLO=un^2(@0$-x4#Y!jGXR0J)v% zcYu$V%u0d><${gSDpRobb1wEXsMubz9D9qcopH0<`rH76BJ?v&VUpmR1zqIGcQ9r5lL<$WI+ zT^WC0QA(<3=|EkFvCoznY;UeiKlQ#yt)Y%ay7T2?*5olnr{Yrp!Mpdg%bM0}fu2qE zxIE~C!HyUE;*rPUsVR#I^$PRNzbtO^Z}V=Q->uP4 zkirQH%hI%7A%LHVIGd=8)RZRUkxJlVO1bq-Z>GeW-8^A$x{{v|-*IELNxwGp`7Xu{ z%~5hk`w>L8KyB=q(*G#4BSy~^+i0e)%Ln)n-Es0l@k`>sqd|Pw1bP?J)7SualV?QI z9XI_2ik}Wj9Ny*Zl{=h*G!-PC$fVH(6wzm7aDwrmFl+@HL)D8-0-ehNUdifS zcY6H`+O<9|CFde;zM?FzrF%z!;JaK^6_-dk0?etaa(7Kb^QWVf6aF@IXsZ)$&R^S= zZ8uQH|7GGOpA*Ad0(_&EH;)W983r6&uyG@oivBW*B!Mts4-2ME*MHR2xl0244?T(^x-%H8XR2gGi-V$eO1~;uEdD>*=rea$o#lt zW}U^6PVXD6yEI~?k6zzu&WA=;nPJGI{Z6|;81&bXY@+`>Z)qNTU@Oyg*VJTRR(rUF zcqzw1dkX*bhcGsv%DqE;^0DG#{ifn|X)lTa2A*X)|!+s&>HA zo8T0P*LjDM0%6@r3yI(lQ>MH5&~rx9b%o^;SNps5f>O3jTSllG!9y`Hqt%a5qugB! zUfj$320l%QwTu;=vY!d6s9`KSSW%xVe77t8@|1E@U9M}XK3*;A9Zd$K!N0exI+BfK zc)3t%T?r^_eRK@OKOX=A5yLOl)I8K7G1#d%W{B>E^EK_%~5^2FiT)o8jGWyKMhP-z1XiGCG_qtpB- zW$-Azc^kr$md`3We}g^gj^@jP`AyBZm9Bu=Uo{}L#4$3M;#dYS{^2kI%R}%2rC*$5 z{Pv5X84?`3Mq8G`O%6OpVe1cZ0D+^V|B>p)M;ItkvJD%=@{hnm19cVS86H8)O1;Ln zx8L8t>%w+_X^GDcu3VxE#veuu>p_K zwXBbis&Jvn^^g&bC9C4#?FFxYgsg; z)d13zEe`1+6f|LiahA95@?{1b89OS}6UCk0>HSsQ$U>n%hzmA}6D^Dz8H{f0Wz!d2 zs8er4=p_O9!Ckh<+FplAkr}2^=w~M!@CCpnsD!@#;3 zrdc1HvN_ef%OW>kYBhy=pgXF|tx%BUVq(J%)1QxI^p1gx8yc>FQ%+G40YS7;l?H}O zO7dClOLd_xK2JlZC9cT7zp)D$5{yg+*%#t|$9R=poC7w&d~{!%^?s7`U6@lOD=gtw zzU*#vKKkB(M*UO%#?7=@9OBuju4%o^Ps%^EscvEDQmQ58!Y>lC zZ%2}!rmo`wqoSL?kzbQn%l0V29T_L1$Sj{WS_4!px#~FbZ6}Tk{@|`VhmB>`0Qv)v z)BBip?52zZ&W{5sqg!joSj6(x${vl7T;a^zLM4JG3>dXD4GWJ@tGkZKo@wiBZnJI^xGJ;UEXHnVJ{M4(W(gA$I9X3D4&BnaUxW7G z21ks;xE7rA+p4_`X4?DbYs-Hp+uGLKjN$9ld)3wLYI?z92x(0WAU;mrk=Ra67E=o3 z?cmh0H~VY&ZD*WBevBN%;4E9P?-ir%52m+v)a|95|9?+R-%OGbphmx@rSRM4llcIU z!(R%G^-ZrrPJRRXT7df3VgAVSQZE`G55OYBisznHFuB(@&^JR%xU;hLY|#DN3=`ig z%SK+MK$7)rMYbbV@yjKihB~2Qk02m35vw~=g2C04P!DF(HQI`>al(;-Hpbgu+$5Ka zhlOSrz#LUwVnn8;>TQTS~G(2Dvsu6z_issgVONVimn+gE^y|PtX_Yom~yLz_C+% z1OOy_Q-SRtSdqaj*avvSPK1q7O?Eort#0WT0zmR{?J-< zj58nAs;$%y&lY7d@LW}%!d|P90n~FLf8GzHK%e>;8ye*@8>ylP_dN<{MBSt*64f$C zi6uHVB1MTLN;=}4b7x0ckrf%AlgYbmTB3~}w$|VE@=g!WiK!9WPd%!24C6j|y%iJ| zcLZH<3Wk>>jk-O{Bvn}1mO;n1B$hJh<*E$>Ixu;NjT3YduK4jwlQ|-!9c1KllbvW2 zmr^yD8ca)}sOND7Yl9g(!ZR7*eK^Pbwa2e1$w(KNh{mIYz!v%jPh=W#+jEJ?8;Pq( z`x|t>2Hgr@Lugw_p^$FRoX*8pJ8LZavRU%#B8v}=RsH~sq8nsGt?vH=Huis6-45@E zpMz&wRM_#sox@C*k1dRw@!W%bKDgh@7FNAd$yPc6T4)vlk>(WTakpYlM&TY2!dZ#9Pp%JJy+ng}#Ldkxi4hF=?BcS{>59FzFPf7Myja;_dO03<6ht^psc&hsKD zWa!kK5{fQ~wBAf95#Um+G)Od0Z9j6Q3!!|saalhWWP+b1{dO@9J5@sG*e6o z;1tZ`Dx3az@JZu9-UL|p>I=&_j=;Q2-SM4<(~n1~ic*qpDji99Up5ebxgnGZ8qY8fW3u(OR94~bb8t{WT^g%3;)98hgc z>l2knrkLA4h*b1Mco4>!1LyGC{Nvpf+Jr9rGT}>$?JYz|Nik`aNP=BN$QR^DX5>i! z%=#uRYv|Z4$)-&6;mnr1kDZg(C8la`JZ3>5Vtn3mP)-|n+@rC;?L%j8Wd5fS)GG4E zIfHiFdW))9Le}-Q=ahzffcBI&HoXpGg9)kJFJwXFnHiEnS%Z_#Xo1mEGJuA*Ky?fb z4e>eA))%G=?w$_)mAk(i!_NT==>NfpJZ-uk7tk~R44!;8s@{@YNDNP#>fES^fU}wL zU&bo^G&X?lS~1Ow&!j62!&@ zsh`zf{+!@Qq$MxVYH-wws#zYVC0!W5$xy8Nd8NkmE4kkbo~|d9GhYpl#Sb_^Ck|sA z1&X~Jq;p%<4q%a!r(;2o7hFQ;}&= zf1Uki6K?sbr>(%Pl>&QV_s$%(Fhn+G@mFf<(zIG>Ibr!4)BIS1Z#XLikGZ9ZE+J@SIQc zkKhECa$wn!ESz7o>nJQ}>1a^f%{i}C(hiN1)Cb%p&8Pp>{8i&aWeOKynW`h*DSoya z-%{Q9ePH$D$x;5Tl3fJ|vVm}M!7$=fE}4C*8EnId{49LOh9MS{y5SiS;JMsGP7-;? z<659q+CDvNqgPg%1v3%>)V034!YY5ep=W8qVdNQ;htdflbF0hqV^`1fx| z^_#@PqGI3fo|_e8T;FR|dr6nT8}Y*dWC8?c=KvzyYYrH3xHFfn?W^~ffU?v^=DD2a zl}Ua~oCbgDEpF#w-N`?oj1qy|81fU=u>BOKrh*e}-*%$ia(*y^ zw+9<6s@YhjW7GXeKyk_x;oP^c3lnB9!(=GaaTy5@f@f;brcR&tEzTJ#C|1)74ylO5 z{wT%F$SPy(08qR5M8kBbikfukh{}@ShBxQ+-KXeGDYm;0qGtc?LIpFOr_aH7W_P2t zWk5r;sltSSXIqg9kAOyQuO4v+9Q8biNb ztHZtF8o>{?p&i4+f zZLOGZ?%N>xhp8N=B4hr4LfNJpuVtdwXZV`?^A(9Ln!C`f43$;A**6PZ?*D|cSpol- zQEh+%Eq8D{#*?22Q&+_WoG^esQ>@iECb-((?KrR~^8t4mu5Bi)@3Lowo$^zT#6Jn= z_2QsnaR3QK2C6&zd>BA~dEG`lua(-Ru1Oo#nNpObm6UdpIKCTFy=kl*lF;>< zVWi&7q2f>`fPcsS7fQcczx}8H4~2dcOC=(v`I9ANe(K5inLTe4D@_F7KRGug&0q27 z?dYBMNmxGY8uCuEt9Q677}8i1E1jZ#DE9@kEHP$sG{M6FCr-~2lxA-S?Ck%@(#?hd zmeoulmmI0D^TwrCIw=A|xyV|Z=JCji2*)U912Zh!?k1-s~ymLOm%e_a7eziMEb9Gjse=8(0q7)pkbeW z(k9yX`4r}WI@;K(A5JE9+lw*lxV26VPeql8iPFGP+;hGh^R~D?qju+L>{=#%_)igX zB=)n`BumB5E(T7P*@J*_W6o?)4l^IiM4ODn8wH9pL2=Ptua3p~Wt-iR6eCMua_)_y zeTD7;v<5#vW!wVw#o?#@%MkoIb(j+?NlC|ZG_b&n(`mIZe{H{H`z-|48$M^H9An*P z)fq|Rl-|v$+vvgZbigl=v9QfQJ9KK~gsJXfd8ojR<|4xKFi)lmICi@$rBr9OJEhw? zZX60&o@PVqpx^PQ&KVc{2JBk5$f%)bhDLd(?>hibe%V$bY|QGwdcp%p4nBY6yWN@J z2W@3C*A%ils2TLXGXS)G&aueB*p8VGRI+{knj?TMiY&a#dmCbgPYz=X>Rm7_Q9XhS zEA}Z?mmZOszcw-IHGMMColEi{sqGK1<;YvYMpf4@Y4|jZFujuiUepotsczf9jdkE) z`N|1?n5HOWT~8(2w0TtCz(hC#N5|gckBZg^!pAN@>f)|)3XD#Ea@M0tpEscEN?dW4 zWria=r;`8F_n|KWE6?^%8%o%1;cfQ8nb2^FI@oq2;TlO0ZjSTGoqJI7Gv$S&sx2LB zVCsQAZ!++Qxl>tPR*~^$HFmwj9i2|CQ*@mxSZg8TDuYHSu+Ap}JhpB`a)jnyw>AJJ;+-T1S}RO2W< z*=;+sHH+SooawP4;$iRXuxe1}&FC*UB``ZPcz(U0k2Rs;bLT}E$m-jrF^k4h)kr4( zh%w>XH0Vx_vb(8iYN2SWU!O-(BD#CqU#I<up1IgD(JVadV)KIcEeG1M=z@CTH8L&*#}!Ns_4U3# zAwR7HaL@Htc9BJp{j5=zLA7?4Z_yZYp64#Qm9xD#&8TLEFJo$9xu$wh{g5ZU6Kdy8 zdFEsw8@Qkq`Of_`Pb2^EELnnc!b_h@zZXgSNuzcdqPq;#%cKK(M|Ppfx(*NIznFV( z0=H(c4na4?iVc0rQ@?F~lN#|Y~b*qDj;Y|q;Y}^0SlaA zy6|xWPd@HwxG#;_#FJ?SiTa;oBL3b!hQHm{%L#n&hA7CfvIfC&pc|dcSsH~a@)BNL zYBu;e9Iwsx9u^w%_Zf1c+n2-p_ZjK;+uG7}KEAzpN!{IDKiO*y4#moGg@QTh4o8G; zV+!-lU#_!Ux~0tlI4m9;B6-~0QuN-H(yMJkicfdefYtP_yhx|92la~)P4`2=eVF+i zJiu@ZO_ZIdFKYslpt1m#AgT zxzPvoC2t@Pw(YqrGJjUOFJEWwY;EZM%m8L5o`1%PysKMzAG`f`{i%K`4&wlZ#a#C+ zci6GS%IMicB+R7K3FXVUVrz@hG=^~kNm~#TLX@O0zXM5FLIV-4vDD7!m)XbX-O?D- z6gBML@h|L(By6@!0};KN@AsYbE%z#9DQ2^J<7 zngjYUL_&nn12d{ljDpXtE`w!u_|XbU~FLu-vL1`~cIFJ=7*DqVZEks5aN0JUe zhh`y6b*07Zn@OfPdY<)7CrR8Zx3XdhZI?{X-xMd}sAr+TqQf#~&<|(K57vR6NoM#s zNRu}%buqi%_CTKqq%djA<5joZ;d?|oZ9*y#53O)+T30=xWuUcQz@5agRW@73d%SU1 z*gdYVg>#eyr?Ze?-hI<)y6($X6LoU3}tS!I9)HapJArg%c-{pOO;<}@jS_kdekDtIYP#d_D zG7>k!)4Qi+>f^Nho|+X!&bRnB!F1PzC$BfJs8D#lBbT(ld;!B?)y9aE`?hio34gZ9 z9Z-4~4C#{fJXC%s7^`5>cxW%cAaF(4m;BUEy|dScW%!Kf`4k`PN8$Ul-tPCciJMuc z?7Nac&WW2s!;J0tV&;j%^_ZuWxhEc|n=P%HpC{!)ws;}~+jk`daqU}mcA!YZ)msl? z!Vn`uXTUckVl>0GR1@L_+`<>I=**F2p#~n>vHE+Lj?2D@-VHArx5wD})5Y`?uNz23 z&Cm6h`-SO-j(f%PppQub@$qr~=*Q&mNc(PmP`@D{p;oBMNM655?*pSHD6Wh@(L|J^}`>hr8O5uP$1(6pWKiC5D>Fz7Vx{REG@ zPw6j6){W|Ip!ukgkV%Rsa4jwgtW)mG?#ME6=hcG;*qkMm&R-1`>D03eMt>MDFm>_bnE=N4<5(T@~Gjl zn+AqWu{)6;<(ny76yVFqOQJcI{?Al z$(bF>y+|w_o((PkgCw`NG^VxRt-S;kM@1gm)ra`lv3;}$#^$@8_uc!c&nLc?LWw*g{pRVSjO0=@?amx)w32MVef z9`;Dzti|v!fngNu0bJK<92kmNUh?1L?|x9DO7R?WZcXjYA=;TlD!_%&9kY36QGv$(gpQyz!IfOH>bfj2`5PT;dlfGg2l(V~oXYJf? zYyaKvI0%_SuN%iP2LRFOy;Zi^6#nnk+32uw=iaQwK?wti@NgAQxEw{SKZEW5#Qun%c0O2VIxrB@vW33#6KQJSRsQs!NK5(n zyJxIF0|+k4gPaJI(ao~BsCBK0eiAwO4Wevh9|@)Tp|HF>m0lMb~L8sfV z%3Pg^ymO&96x^GKn-8fBFDq3^#lr-VRX>YW+$c0}!NA;0l4C{Q<#z2V_x~OZ1dS4p zxSxJrg+gM&aCi6mU!RIQ#(+BsYJ9kRGo6k#GGU;0M-LrpboM-SXmHW6{#`3PXi#m{ ze_oo$L_qWJ;)#M`Zrm;hZ41Kk{mn(0bsp_F8QaR?L&KHxf|9NrcxEgTTfhz~3*N_Bdu?6+!z!;J}bX=wKgt zhFv`ovD9mMvzh0CuBOXvYw$F78GFa+hEs;|RzN`OOh~BQI$_v_tV*uu{%kVKG#Kp{HQ~S_oFubSJ;ro~4d>5$RSCE;IPo}v6%5PYHR~E+!Eim%2&*UC`0cS!&|1Uw z5#@48peNucN9izB}!#ts!Qjc=PIoNsEE1)2O_drF@j~-V#@Ota9{BT zd!dmAz;s)A)jBZGzA~UQ;YJPR;4(1f6Q5=)rW#}h-UZJNU6p@>{#hr{CSstc?MpGG zE0SuKv6ZBbIrSd^1am(l_<{ZD?F){fUoQ&NPORrY_n{+XDgf$U4fw7^s6z)FK^pY~ z?#8xFe{%S}p7-8~<$rBelS9tGYvVtHMCtl}Zps$|kD*n2^eWU&REvtG#)?I_8C1}M z-Nx2iBLo>@lGb=f6o>KV6Ozlu@P#h|r9+~|aFea4UXV#UkUH{sOX@eK6YBu|$U743 zbDTs7@(IEs6AAH$`8&-$;p6$8h(K8O%--CV}&_a?Mtjyid2CKKJAL zfp_^Xlg-eCt&1_UFVH{1I`C8s;CLlUs>m@If5;?8bhso{`+eOaI^`ao;oM!$b$Zr> z`pIU!oq&659l8N&T_KOG6R#=*(!hfMU)MrsDgrcWPqcsxwPdXfN&T^}+G16J@&Avu zXhP`ZCusn9ts#%YjAEwRcCm*s0SG@u-r0yKVr|srG^s-ZAa*cYF4P?$O(N@Ybj^jf z1-K-0;*D0-gDWuH%o2%SYN2;M^X1+M^pH&uM?57d-Y(=RGR)5_qf(lv{{H&3f!1T-rLn zK90=x0Cb~bPqLs_$)%Ms2^j(D-(^PtRU!V{w{;o!rp(@2_qPK=?OyHJ2o*!6uD^su zKNxOY7uqYvwhoj}>)H9c9WY&~+#Fa>UoiV+!LN#c24Db$qq}5GyTbmie2L|Whr2*a z3IE%%1SDXgy?Yp;bw$ubyO!>sKh1LqO>HI1JUet>o?HnDxx1`r=!mbip8vBNXZ1*? zAhEh&+b>r5#YexO9w_-v1nW@E+fHXY&~ zgdw0v_GI29AgveUqOK3d+tV+Vce`m$o;xQuz_e^vfHj2F0XcP}jf0gpSv{x5!+{kQ zR~^8{?(+VQremNEQDVZW?0fI1V z_+S5Ig3`cJsn;NI(EsA>t%BkVyJk_`VITw#76K%=24`?d&;)l4?(PyacnC1KTX1)G zcMlGO`yhkvNp^kb>YTm5^H)*D9XGt|S-rY@^}52oH`N&17SkK?=z>x}yeAI6316#a zB?Wj0l(3v2KVb`#F`J`Ln%Pq{3)3n4zQYrXarD8d9gg9To$xI;ZNrCK-m%d7wR_>s zt!ehWr!lHO#OVlaf3jGAbmX7R6@LBe^SDi%T0N-3IJ|IChsMe@3(G^yT-h*%P|%8- zZt=m|fqGda#gHex)`?6F_84gcTNo}Q?_qOwF4#WXuha?Fn+NX6N3UVc>HqSICseA|ltQ%$bZ-}0?9 z={58<|F7cuZ^(Q}_n243w`>x=H>7-}StUa$F>+XSSoh;8Y~h2i%`_y@J|hi<)p)ze z9)7xGuhe%UMj;MmaZLB-Y3kEF2tIo(k@3Bo*b_eIIoN(#X|dqtPf{9d95wv-XOKVp zH%H=#+7U?1FG!fDl|P3JBRxTYVa0s^I@Wj^zy=vydy@|{+8$7+{w`08b`oTABoQLA zP;=4jS55n23F;I;eoV&LbmE0`PvEnf3s9 z>6CXGcAR-Ean30EL2|i*Y-4`hWHAa|Pmm8!QPG_`*2Le}3LP4?$?)t~NIcL@mUQ8~ z`r4p|x~3%!@m*s!4-&TVcZh1{E-$=sCXJ=z#S(2({AQf7JF7k3+zx-=e%@_C{&-Z` z`tEkDxt4AIz&W&-8$JCX*#vNweM^y}XlFi1Q#18>WK7uRt?B;6hoI(-!k?yZ^&w!l zJlM(dpGH3Ko1xc_@6DwDzMDWXJ>c_2jN+NknO6+kE!gCUO=NdcW+q*&d5@1iuTbyY z>fp42Pk{MM0Mxv2EWg^OUikdBvlTHHQ74I-0Y%0a2~4g_l7$4`)`i z?H%2;7P@Nt>_LDGK-kbj1UXDetNgbwhud4%?JXMh^|mE;R{pH;-7Xw;Wk#5p}Cq!VuF&5B<=P)Rj0@FiYoFRbAGmn zZAbb2$GF!+oV}>|l*VcWfHu9^IhSLyVsT;SlOsS?rZKx8Ez5BS3~ZltJ1$RnarsTk zj(sU=$rq!c!`s;}k}i+p=D2fU(ESm`N}S1jI6M%YNsgGonCT? zr2llGI3{=rYr%0fjj!z^|Gf-%*rmVq|6#TNQ&VnH{S{&BE3~&)-p-(V<1bXa(sv&6 zne!1!5pn7k455=g232sX;HJBH8y9iym;4n zN3^h<>kUCgq`u}7B#D=8|lfsPuELyam>Kr;digUbgMJF2NB6Q!h@I(b@l}TA1 zJM?Rs74mo=UqQDFJ1Bi`^A9>&uirYe?d#0KE2F!J1x_Md)uDWVeCfX+N~oSW02kyQ4et(8U_UG;8 z*ji?hOTyYo0|#W^FZG^mN_OL=C8S(k7owgJC%qJk%U^rnazdZlZybp77&~s;xZp}p zmD_Ji)+N_N#>*t1f+pyT@*c%ntniEd-G;ik8OZX`*^28QH#JrvN&rxo;up#E*)n!n zIAa!E*Wr{;K3F4Z%^SOo6FOuZ$p#`zh>$Hr>`x!Wo{Iz|M5CWc{nI0{x~_t5wvl{3{-5XqAeK)0Yt)Yvoh(TGB}e*X6??03vgz7ss{jOh+%LVTMkFql_i zDCMHz4~-K106Sxs;k~Wjj==VkQ;X%!ys@ZmAc~-D;LHjt%1R0*M3z8)7sM{-X&gcu z*k3smDT36G>+2y^7ox8z7~6bxg~ly-COjbAi)7l4(>;i!*2rA2EEiWFy3^V^iOou; zg`GNmiFF;R7F>Z_NiHQOVx9LPC_7oVCX#jpO{RydxK9w7iHWH|c>dNCVm>BUxMZ_3 z?F|sMyfN{I;)CAvC7e1u3(b7he?)DH{nz2=(NcrXcsZ?#1ehR({v=c~?du_AZ>=eA-L zUHVbmABxA>7z+gdC3=mY38fHWBPPYI1EZGm-$}#sFRn}qp1%7#oaeR_!VosOUBeGY zm@cx{+W9EJviSW+9+^Q21;^Yb`uG4xz&pF#FeP9%P8H5|-2JPr$4|i;2TX7pQ(s4o zaEtm{`N^^GX7@nj9EXn?3j-9pPQ@7GS0y-k%Wu;MR8fl2T0_U<+$Tyt!$UmPGPSF$ znc~9J9d62Ij(~R`9PO|upGO+xy6H z6oklD#Gi^^;)tYzDHvW0N~nBdjbc&hwlq&5u{^Orya?=nAIK~9BDti%f?LUBLENk~ zFbVgW5u%SUe;C498s%g>-se7P7`Ko!8@VKWJiq-oHQ_OIjc`@rJBoY&Z15)t>#aJLz;BeRwvN>^E4Wk1f3weE zncUCmiQn@F!;gpdGtwWR4;lwh_Veubcf~?`O>&mJUzEQJIQt`bR#CAlB0{~F7RS86?nd0P6lFahQf}%@!kKO3GptyOZ1-+ zFcIuSsQ00#CuNW@uQd*#-2gZFLR$f9=BMM~+11dx$2u>PBJvE9NN<|^ZWGE*!A@}` z^J^Ce%7x?;6toHVH>{`Npv3PzTpdBzf+pi$U*W-=GnaN%ApMajZyhG^!qpB6Gx$X^ z0!oyhZ(6p32TEri)E`~jX;QIOT~tjR>_)1gMt&3LSq-AATpTYCcK1yu*ymquO9-yz z@PnHmLKBhMV4wLy8i|-5?wSIU@;6$`6{{n>9*Jn1H3iF$GStd_mGZpeznYZa0#m6`Ool9$xF7p?_QujEEV{TO3Swu1lZxGi4?EbCeDJCZTu8omBIK z&uK=+Wp|-$a}fJxC+C^FTCIcupO?$|w!hwn`VVcmgE@ggoJ0X03*1S(wG*}~br4aO z*+CH{UJ1qul9et0x4>);Nm7w*#kyvk(RJ<#Nkz&k)LZHm z@$1#tzWh*jb047;SOzv}KBpo-OL{-&O}3p6>;G`p`w#R+g$J+ucKNWS5rW)2mXFcD z8VIxEgRbd*zgV>eYrUFKRctJtnhacybs;1HWfuEGC(E*4DJv4sUlL@eWN^)nX$0Q? z^!^pF&5H~n*oflCRkpC$5)|>^K>L!~KlbTBa*AprWg==j@MYf!39Nss42(sqjS#QEcaWf=-8DPjW~q^ymr2WmS;*JAeN)Aoa|aV`N7*Cmy`_6(^02 z>W%_M{rF+r2n|J?yVKoSdf#TwiM!{ih2U-Y{X59FM2f^Uolx_A(0!w&I%=YBeF+U> z3HlTMEhnS~OawPT9I-r@vTi6lcT@xNOXz&OWm#}!ba7#?=o5f?t-LOfhskL{g9j4{Np|hke#9YS(_!o{O#rkv)O48y1SkBo-gHg z(5%i!bO+Ou37#LezRUu{7s`K$yqMS}?WfCzWt`b^X(1DTe$zb@&qt5FJo>elaSJam zI_4*cS8>Fgx1TR?8-N&>n@qN?ya8w@LWtZ`?H#FW7?so~M2d1v;=4s&gaer&eAU`$ zogh2cgQdS^HqcL(J$)ejCxJ%vJxXQ3NY-mXXhHe$%bg^(94{mL{CDZ!PFqod;>U}O zprH2>wMY7>u0uVgohYQyW|-fQ-X1{+2!*7xG&h&d;C}*sb!H!Iv$kQ4);!}{y^O+@ zw~S8>zv84o2FvadabQ1|-exi-UM9sLc;)y6|)DervSq4AN<|)3Z z&X@?S8)10wy%M)Cb^-tTDg8MS*c>*OlI{Z5h975qykZygymf7JSHm5AvLB?B5yXTr zE=>JMZl3-~O?wxRkct1f2J4pMk{vJf#x0sgXiCQpv2Krl7m!kt+~~Ob_~riA>L$O< zsj>QYRWt6QGoj8oc_O~+>w!K_|DC4UVCFk$F2i_Hk?K&)E7$uSfI$YDdf#%t(XCm! z(R-)XwC932>*;$vm=J9s*cvJ}gu zgh2OYEzes&HnN8jnY*%) zRbZFam1jvemjs&VP*Nuy@Ywg3BO=>7{LD*8hLe?HJjclGZkIi+`y=yIPt`5<(=Vta{N!8t%&>r?hOYo2rh91@ zN5STI*LH`d%JoA!!xni1g10d<;;gN<>NgeMqfax66>uQwvw1|&S+4X)nC(XiXK^i> zK|6)${S>xtI`v(bMDWM?n<>=UF%n7}2_PM?D7z!OL>#dAg;Efmtzu!Jah)UHZ$+*x z^cUZ(hWyi;Jw&Dx<`$Sm^m{lKDZC5FhJ*Tr-fRX#<&lu)JJ(AdV~ayu%(bnsHFYZGOM4Gwst2B)(p$cS2NOFrsX^@p$!9;&p5?R zwP}RDm%6396{biCs%BJg?(27(t={oemEjR{dX8n92-?^}< zO1;4tB1Czu!1!Pntc+K92)fCIlzdqz0!KX%rcmBD{?THqJN<{vd zZU%BnF87!$GlQm|o=mQiB8!No;U)mP4!ud)_Fnz>V>B{qhDS^4GAvsU^5MQT@$hDq zDb_2PqE}kNnt;u&htvKPZylh@iZAGfeMd+^48fY26u@#o46@FIOVUA8zhO6C1jIaM zOE`E7#sxkn2SDX`{N~){5N2Rbp-eN~2Y3*WR$8y$u9WiOlN zdntsg*Z8|yc7+RzpFF0D{5^2c5w7Ob0c0Gb-94+E_y?u<(Vxvu*D_ch7VMJEcd}s{ zHD(SUHi-XGq8DTusvWRJItR*z9D=`WXgTbkaOJM~K8o_6bmFD;S2}Smc~7+Z=sAu# z+Hw7Si{t;5yW{kz?w9&W)>>{L%G>z+q(e$_{WL=jJgt7!^h|XAeQ5}YFT7i_;ebS? z>^jhrAE`mPs>nEiqYpPeM&9cDkGzdc+jDDOb#%7dyL5L)azU&orG%;4{yFWus}%NQ zMMMzn+3;h@0lYx>^;@~y^bPcU>=*jxm#T1;Y zi}rU4F7Cu(3d$*3$?_td{ocG+iFfByRInq1xY!j;d|z_$r2;QPnDJ@SEb zwM}16E4i(sS^GTG^SpsfSV`TwRduTfd>G%|U#?_XNEaeh zH&5GM>qiQJABq1t@=@~_3cF=!PilT0_s?Br?%`}lWm`m)xy0u#OnQvid9{W~)uUB{ zqcROM;Z`1rl_r_g8fm$6Mh_7(v2~(OK`wT>gbbg`9-mYt>L{`&0;cmMK37c*7YpXY zQi7Ksovj&Y-R=eL3lL-2ZLZ5rMLkwFednt`5wD{EJ)=QP>-R~(@2P6xfrPc2(L5^& z8TbAI1rKY_Y8IwvYqgj0l$GOGM*gRfQX5x+L|8H&X?u(CsgXG5(LZ%guR=aH13DW< zA-raT+;pnP_Go*h2v}!h&8#gTEBkmxOv3>ui}12p^b+rkBSMPjO3a_}3bhI}fQ>s* z2NAP<`_zlH-Cqm0$2P|1>nyXbODCk(i4hujg)yuqO1*rAi&;$cqSEPZ^>g=)*|>rt z*UW2Y0W_(oA#ya=6?hf1>35uDEj_eI?-sH$jY^e^jtX1;oCzOv13f$@-vhV10?gLI zqkn+g;`G`h4-U8+t&~)*B==vuXU|!FjQ2J@l^$w3jQJj95;;HZ3b1I8(`XEgs(-_r z$xPe81IX0@8dP^80I%QkGxFRYQD0I>dO(f4@QockZmwm{6npoEkZtad>jZUxe2u@_ zJ?6L%4n1_2w1HQz+zCNqoyB-$%wY9+hxBHho9EQ-6KOQ z_4(Qe!qVDQuIjM*x@=Ga-uxRUUnrIX|LeARMtbG<+$mJ_No^W|AB9JS z9IqSKEJ{tRE%T5}y!1uyK&(49fLC#DT`sEzq{1U^R7ccb0zCM+zHxtLgT>gI<>iuo z@#*6*P5Qk=EgzDI$@tVxEZ1H_&_K4^sB&EOvnKM5mFg#-E+GdO!T_kc?<1#GMLC7v z@dT?pW#djxQRqGW)gnt9#j-E9_$-Ac>a%zYet0zZ*k-b2 zraI39X?Gtu++LV6YTJ49! zv%p3GTZZU5e0-M^ZbbhPMo<@i)++oD%6dVODmZD_p3}}dBV?$WJA4qMS9uvMTH^at zlU*ZII!wfz)BDNZe7xfC<*bL?B)B*U^C(qB4q; zI$a|q{+n;pu0D!IR2p<}lc@FnO={dYl>E+7V?In_q~!LwYg!6K#9Tk z*hN$3d)V=ozBwyd&2ponK*yx&*gbqUAXy?*=WFGu(u9TiC<5e&u!x&G!t`-`HscS) zHPV(@t7brvZ7gwJ5$fw1U6w{F-8-|FrMI7m@o@yt(l{MIA2(zCaa9YpzSpi-iosO1 zI#7XpDBIiaF-}0$TNsqbnY_8KN$yqk=I7{iL_2h=T5154d1?E5UlsEJ1M(sX6gX&N;Ij>l* zuT)azdy>I(+H)mj;@5B7sVDBQzju|&wQlky2aJ|49XNx>$q&8>n8EMU&q(S+!BYG* zVK5#I4WuO#P4Kt-Q8d*2|7eXbyWhtLB6h#*Lz0lcmQcmAz?E4_=t}M5EP1_I#s#2d zuNa&a->lF0pT-+t26i55=T(J8OR#l_Kjy9BG;LfqkhF{?LYt@5wT;kR(2;t+q&SU~ zsBk%0>mrm!&mQ^BMyRQd5RDc&I>_^$B$^ef`ySR<8R3vWuL`ipQo3Vy3PMX2C%^b? zQ$9?7ef41}#d$xW5jO+OJdE~{xxY_p)~Tt~9%bE$agLTG5_9s+YHayl&$ZCw&q!G@ zvtH5Ie1xBg-{6kl>|y2wTQr;K(^-<7H+AGlo9z3;ZyNn{WHS=IO^vFb1bnsqBXA_& z#Dg^YGJ$ZlP6!Hp2l3qaJR)8;YC$B@#2_?M-MzL_)MBH2OPKsN#Och~R{NB)H$uF) z;oBMawaI2+X7fk3MN)&_IT4H^ z+Ev8Zhrw-+X-8*M#WmqmD6Cn`@-v-T#9*7p!}z>Cms0kZv7A(ZUQ%Z6Kf#-NqTzA( zOk~1f+aD;fB&XOqFgCM&>Z!nw2bbG&#xNhF|BcER*O(15MttN1H;wHSt^ zbBNYXeMFh@C^ayqv<4ogT`%$ODBgi&QDfaskKn*(O=gG-uvUnY2t%h)|5~HaS8u5P z{q$ZTbQfCNS{qc_$r&1(J-K21xvxJq=ur}cZlKqNELs$$lzW~{OwV>UI-79%svBD@ zrc>}}ifg5?u@wKyM9u~jyhNxzp|P9?O8G={t_P|vg+6Q7Wv(;ekmDw!E`(5de# z3Al-)HByQHK@c$@q8;Jm^35|yog0a3+b+$Eqf^`xpjhlF1N-b3cM)Lo8mI$fu<9JO*CdK`OU!&IdI#h%!b%aq&XG}U+h_6#UUjMQvg3xPpQ?+$fK^oByTG*BMZNhy2K3D9%HOKNMDQ z9}_Zj|C!RLdm1ix&$99f%4=ep;m8{J9K1YXnX*u`^dK@Fy}%KFr&7){Yw8dU6mAq} zc#lcfgQPG~uK7XcN&I&UyF#(!KGy;>#cu00 z0amcDqw^=syka5?4+d-x@pN3~V~x_pmNHRD;0~4Vcp%{^xW8u<9kkX$qQUT_&hS~t zDQ5-~$TSP?we2E{+#I)?ZKPaibhU)EazqAJZG~+QK#(Rlw+G&x?iFfQ6gEto!C>ex3@g9WBDU-XsMT6^Y^2rn9oGM5wS3JVeZ)n#UU8Xb#`KjCMW?>6Ps8^_i#6Q~g2 zWOE}Gb&Mbbb2*t zff%0^Z(BX}#zdXTZ2e77%lEPJrIfZ97w_^ip%=MuX9SF!rJJoWN$CTVC6+{{JF*C{ zyOMY4pOl#pjB5cafUU3@Xo?{}Dt-%Mp5*H*Hizp`4kmCk05x z-@irLol-y@{5ym!Le{6 z#ozM=^Q+v7A8?v`FE5!DF8FL+#evT=V_NYIo{juHNT~@{a_-;%P8gCRoU2IGUEuj8 ziEwq^xho#(|27-=6Xk~*b6u+Sp@3<1vpRavf>anxtOL_)2~CM)t@+V33XAX%j?{>+ zUx6A$`dY2|vYobqdOB@G{P#}45^(2+edVGmx%m7V3wCx@!2&)vV9(jFnTZs4ivv!r z?2iYzXG!Icrwzti5}_fSV{cvHr@aWGU*DV8C^vfJ@i$x=!Ydw5u8k^vW_tdHd? zL&VPwnen;9vv{9drcBJ0Za!%x-{o|pu?ld(mbX8cYj|2MtXMH>-v@t}-rd{(D=|wC zUX^)6Q5(Ccz@5%Dt1}0i4Z^fS8*21bo6xbR$QvfawuY3>Sw>@OB;Mb52QSOV} z=On2nWZ#=5|!r~ufM&<0LFsD%lTHz`}g zz6k^+s4!1F1LM{Pitdu%F6E52{-usTP^Wj|WP53sPZjhPEP6l0Ny{lT?pWK6(_QhI zmyc0t)6CyKH#FjOOEB>aKKqJFfH*dZ{DqqBAYer^Mettkqr@P}ugtEuL{M7hpR7od z<&3RXw3JH;pGfRdO4nyHQe^SruEx2n`9j{uinftl+mCr6y$^afp}Q^6IbGC%kWySG z4P4cXfG)Js@|6CiX>j1LTI~+owXcl>T<8N$?AHMFYxIzKVNMY>WUt1_YW;x$WRj+g zh02@-nE^`OC7Q^Y8jh$xHS(W4r#(6g7AZIrw?S#bD($G-o5NzQl(6LeYT+vApc1Pv zj^}KYG*jXa%?%-FL|+1=OkXF}m%Ram!?u}EdEf)sYu4(4_gN|IKm4OaRT4Eu$>@J2 zF7!#M0p|g~p>CQ)z%5@p)Afu0CH`dV{^#{3YR6R9vj6E)$um{wqxP9!W^EYJ>Um&< ztJssC4RghF<-EGd%6^(YQQMtaD>tX+}*RHx{7 z>Y3lH&X}vH2jN=zxmx{>y$lG073QCsJ`!U1;3dnDQr9vaDx)=_Xv*i=4q276zl0pY z!?O97Xb*f!+3j5Cq&#Bi3h7>}IO!TEi{n>@2XIX$V|a!V@b8wA1{LXlF$-GbM!am2 z|M?}R#G&oe3J5EfjIeo)H;7eh1Ctl^JA;96C7DaHwcZy>TK!$!@a!B+Oxty{`?-}94-uP^zt=t7z_Wmy9=rtJiE zWANkR0%9r&KZ4en7be`V4X@iEOeqr8bXkH{U%(GMCO#ezt;^rctn>w6lU%?Lzvc!o zMc^)Wu*oR)z0T5)vncH9P?g%_X{_yT#t+8ghW3-o5>4&JQ%6R+00B^ng=>zD%ir^k zMvnY#Cucw}p}(WyfDa2$`mZM&D3)kkZ2>{&dg`BTs{2X7LG6cu=P$RYu zlGOw`>uzQ7X?&x4Rl1#z7as?=<^#_qvWEidwv@HAXPapeBKCcYxgnU!;?*>7xVge& zw=GoRr_R6qM<~wsMDc&?jE~8R&p!9(sOv2k-Jk)O|MdyGo!a1;O#j))bA?h!VC-VF zd{j50fudI@qsM(;%z3c(^*4elEQ|~U5QZSaX8+M|vt(3vQ`=}$CI(WBmT3+Z2_aP~ zqS=MHxX#Dp*MWEzN$wlTM7Y_Zldr`D12x6nR!Yg&Q<9NmQPbSo?2g1!P!NA0s3Dqk z3{_2fp~ayoVKfCol&v`3C6d*4#{up#&F9xiJKe))8ulA}dufCxS&SDHIqSVqd%bS* zf4n^XpXQy)N%qJ-p9>*5@P|;o_B60>D@w~87i)c*m%r80D*lWk3p^{a@}OgRV!nS< znOduhVVupQsh$-J0%B!{f=$GrB7? zn)H-0sBbb@GfEa-jN!qi+UUWE+G8-^sNFUBV%jClQ_JW5sG9c$W&&vWfAy9AM{9)T zd#s4#A{i0N<=yNoR(o|pbN>g-#_@vg>7@`lZ`mr{Jl_aYJ7WYH@7H6 zNC;3=;Lc=g0u`zw#%W7WkWc2GvfAUm#S8~s04&6$#Y|lFSr;>BG&(Bt_H@dA99SCX zS5h>*JN&v(RbFeMC~%bKJO%X5NF(bBU3Xw7yn?$ji5z=}n-`QtU^gxxUPqT?1k%$#j^OEB37+ATK#HvIyZ$`*ol|Fw0LSznW0z3RqM0K=Yi6_CP zL}k|4Kk!>&*glNt#d!z0mT^;8)@TG?)6G0n;+fZ55dT7*2F8a4q3v!+&q&8ubuLr! z@JBe{7u43sxWW*c?3Ub<`gJW+l*XEj!?{USmi=~vdQ{Sx8Jk^-SXiLxonzRf7DYTqR7MfF<81u= zTA}N4vw|AM&NePYHOiN(O^#q{`uT0NUr|Z>Z@wQJp{$o&Frf@&@4q?vBtRoS+Fg~o zNn{|bJ!1TN&|n~?zax17qSBV;CKNdaI<`Q69I{8ZLkRLKGmc*tUO?@r#Y_jk^@`=zw&T5Ys95 z)5qwmrYEMBSoQXaOdf=#rF@}oo}p@K&?Ef5n}64Hu1JN3aF|5eWPtjr6`ua>Egblu z=kzJ4k~O50qEqk}ubWfSl{P)bCDdpyH26#?zylak+C5yEUf#7mO7K9~?dZGy$T8A- zzB$AVM0-bQb9aUWHkFUqeTU`iepvnugJT8Sv~;R_FuJ-jojhRElMG6PJVBOObto*P zpJ05L^w_4@i^e}C<5~c8^xr$`kiqNT>sM^mi03K`A{xlxshFZ*yiTqnvQgVkWea=T zTCeMmha|d`Yfdgd549arN*7W^036}nQd*mB_&!-1K8U(Mvf4vLxJ6y~821&C?wk^; zpXm^<4m~(dnz3f2lhtH6tP{y0f7G+1lfV`*isC-Tuf4t2_9eSPH*IZz^orD0=F9z| zDz?ud89x&NuveDTSuKtuv*ZY?RMNDJ>-U)PGfm778r_*x=F%P7UbUlv*}mU?7*we; zM_GMtE#JqvK|kBxTOR*{CxySKGW(RnTUZ(0T=JglH`&6Y@>%`7SAZ0sfs^Cde ziN!pH4rF#9pRwbmjfz%!w(hz}hp1nOO)%4LD@<9ZuXNHnaCS^LQi3~POW{w8SihFXYlh?+Hi#!Qa zAVT(k2Sn};;vcCWou;()O)3IXm~JurI}qNVY26eYZa}ZAUvBzpkM3Z%Vk2zzPLu2* zf0=KZ9q5!NRKQ&T?j9D0cc8$zdK{E~py_3sJ}%(IV=JAgq3)Y3E7NI_7aC(0eoHwBGy3Nnxop;g7*KG19*Swufr#GqA{4TtE7(pO-s-s{$n@;K2$n z1wZtr&*&MKRj?VhvEcXY?YR<>nT&I8?V@QctUXGjcBcWBjz`Q?36Kkr5tTqbzyL{O zSX_-(E5X+MshE#5V^GyS=uAi{P6vCMJbvNlV~0168)x!I(vt@jl#Ap7_(eBW7Cx09 ztiqhG%sPua^mIcq!XnTDF;ERDpKaAd`O(mO8y=jR$Id z0J8Q{R=2lWjp?`4{v;%X-;?z<_V=)|!b1_fls+fx8MG;7?r{1R816cd&*?T+uw7Uj zKOwwg)!{CwAoxGpE%}d)6;^ca>t3cKW?U49GDxFyL&Yyj+v--zf@isQY*7O)Xx}j) zEQc)JdRpX>;b@0M*rb?sQDBV?{r!0>t!b+u!@+tyy%7s0MpWz&q{Az2ivfb_+3D-^IzASg`l)oSRic=W2a%zkxI%VX7V z&5GXedxmjlX_jwKU&>2GTGW)pu}OuZV=Lw|NOnJiqExVzZ|6b_h~+T9%R(0m{MG!= zlG{zSvux%-U-V@4SWzz9ag_7=syx=Qs!FPylnCzHz|yuwO`an%f^n~-0K7$9 zx#63V70am&HexHpl$LQpVnV`#DK~57Ih#TA4$c^fjWo{JJ3#(!c(rqct3OO{t$9tQ zrAK&*sM!GWy3cOf@dnxoTpBq--z!0W3qbFK<^ZT4OS4BKY!TAl-)xn>Fo$u#XuCugEmM zFoAih^E5^&`{OJ84xA3kSUvfz)M(6^FW%5t^WjhXgy{-;aq|}KVVG(BAj7crVyO=` zhTdY#WH^yBVxLz?r)H^YZ_Z_V7-lm7WWjh;)Y@`1;9de9K zof&*L$<7%E#&qa!ANif1*%$22$XZr;s|_sJUA9yZ23Ss%?Q$G>IL-_f`LB?}yt-LKxx~Jl$5S z&H1~geEoOXixo5f;_r5!w3{L(8<2m^mS8t0Reu61-r1_wtyw{2yFc|=<##yuL#ZFT zu$9GV+*gnC@ozunePrjUzj+?&wSN0_9dgLu-A%D7qcQA6;Yt3=#BvMc(5w-6q1x(E zLzq*#Y&r?#|M*+2PtZEiFgC<9lYw9Ql9BBtMA6Up+WVvnCja`e5?+ZST}MF>SE5~u zlZNiIn&p+lJl|nhXyJ{R6yD1yt!F7DTEksh{|;jC?d6Wv_2oS(oy<;|tkuI3&PpQK zLFsV_42&o7??(jLeF>9P=`&XpTRyfCA^o}`xke)WKEW#;plp8`L53H0=_i%aL*Sfu1UvSE-F zLKeC#R$^t=0C(N@qgZY+6dZS~*wfIvBFeCO{L7RNSdiA57L61BeJie!KZTVbqA7cn z#W%LcedH{iM$ZL%r|d_~ux{&e92Q^Q5A(gXUbwt`(1mVO+O)Fuv-6m+zg+v>TQ@_H z{bFynS&BM(f8w8aCC6guc~-D-S$4_2rf`s00Ad7G5xThuODs+q`M$vWJ7(Af8|~!c z>ivRGz)#nx2@~O#rhae-C1c{&*2kac4SnN}=B`Nh;4^Uq+?)^UVH}JGNeo)BMbrf2 z&KN~O)oczwd&mc+R%W=}4F0B z5{HCrP+%3!7U;j2u2x3i?AgnBRm{^eCZ2zTJ*?kd94q>{+e^;m;0nmu?4kD{j zM1>qXTwcJ$_d^kx-|gLP{OGq!?~A5YLDXUjoB?a5zvtbui12g(M_$ut(IqWh<~z9= zU$!w^-r!y4>!9HoVIQi^dW6(9UQBv?bj$XBj;wvPRALV#9{Cd5*Zg!ieAIwctAQR|=aO0Y=O6`ch~}$zvI^GGI_ene ze&3vbeex2FIhy@Y?m#&kD0J_^ssQjfblO_p1p3_sp%yY$+}ZArl4^7aC}%0Iu6(U2 z9&;@4m#jsk<*J3eBJm>(!*Pf6pG5`fYyOEr5ID8|s8Q8^u0#Z3Ug>N?$z0RMHb|?T zs#==$C24acg6ZJKDw&Js>b7KjmnU0V?Q#*kotwbnwxP_qc!>pHj>M79dg!!tx&odga3NR{fAJFI@+ku3=h8shISudg7NI-CgJdG22HPowiVCQ1gZs7r!c`^>zVLyK=>~oaJ(+A4SOhIyY&CfeikxV zzh&=_2EESe2m_lU$F)<2864LhrrYJAoXl~exSG+Z!M>ReF(8`^y~NFh-W&*NUy|J8 z2%qk*1NZQT8M`DLir!Vo6o05XF4cFSyrvp&ZtQOx43;bbM9t=*w?$o8qZmfkiL$ju zJNlt?K9T{i`s!rWVu0Q-y48*q4^)wpcb2`uK4AD^V$E~zK8L@i_Tjw=GH8rT1t{s2 zzstR1v6>2RLRJyYaP+&nU)pz4Ot&+aXYQPVqe}pNmY*?ZE?ZKEg<)SRJ8gta{LkAX zWpeFjw%5`MXd*qmCUD#>3IXn;PI@2;*G*2E^-UnF-=FW(w;fmH(^`O2QPZvphtZ&; zwHcVB>|>M=@8yE9w!SR8#6{hHOmZ z$6+-1Y1s6~%AvINtrzb#YuhIBx-z!-8dqp=+b=AD_f;m0`>A>BJ3{)GP$Ey{ z{=4fD!UXn#>OZE@b6!YR5DJzEI_XkOQ%r{-ggP`UIF4nDT;TpJCjyzh9e>n80sFWT zSjQ&wa@D{`c=+CmyVKX;yUD*dXOD*+fvnDx^rndyd$px5c%?f*o5R&h7zhX_kjRgL zNYSRRqPy_CyfG=N*_LBw7-+WPS*ZS7lWBrWWieiSbFCCbF+JmKk%5Lib*j&}NA4N8viq22-;`WW5`9(b*@=^(} zz?e$HZi7YQ&x<*k_j4lCb(H$b(Ga}Lre0~JKtWR`k=?_5c)}}KdIjv~^Jx3->L*gf zS@$=iapJR#>I`oKZGGZ=TE8z%#9rKylmA&bli?9CR9_aFQ)o?u+KzDk+3mA}#p{GP z?N-_w9Q_TkDBFJemZ$^cQg^@=gk>-wyY|EaOVYZiRw4z$avpr0Z);)eRof?O$}F&@ z+_Z`SD10-93wq!l!bWz50p&)`Jx+CT+h7I<55m7XX|3!PBu8xXmg}7L&^YZ?jEJ!>VOmFXXQXx9pD5?Qvv&m6pMwAufYzMLii;kW4 zJBk+X*9jlzd7X+e0SRRI$Pmb3^-t12=I`lxOs1#}C~);7JVrFNvNCVKw5-aWL62XY z{v-%TN%4-p!_>8W|gqR7XD!bHhPKl`P?g zc`7R@nF2~f1>*iRQgpWB*LGK`UI|Op(Pg#TWmndw@8$QnpnlTlHDo@Mk=4-Hcw;}= zSDH?Booe@ZwKXhUL+1_WXcPX*j zctgU5>wL>1nk@oi2IDD*X`5jGI7R5Lx_kquK}Z*id>i?)`B_{vjt#r0xU-Jby;nSp zx?8#<;{zE2F0cwLSbl08*)`k_M2eo}2X!Ou(!{@_e|b&jUO$g`po6F~5QCI=5W6Hy^|yW!&m&+u;$;1>rXGzV{inV44~# zA8)EfC(ba6^vq~*s6iuq4G!8C#s&4WwC-;wmZo`Ci01EOiNO7u=gM8pNr=x+XAd>7 z>_bhk?fpRjvOn~@g>P{Z;UjSw14T{S6UGmvhOD}kq(BqLFM9qoJcxF z_+5&a&#u=#yw6-+M6AxqL_czxp0}t@5P}S`?3a^0*oP_xHH$igVGWC=a~;Z$wrHie zZ08BzY6W>tgf~xdP6*TMLc&>htU{lB?@ovcJKFAYCUuwVQ`CH@^s%_n)_V9q6{$-t z7IbcG24}nx`dHWLO&np~L|z9L+b^^ZI4LUakv--J-|Wx0(^^QmR#8p<1>EqpnA0Uo z2>R^4!eWD|>i3zvwm-({S}8%8Nl@;m6yPU#QbP*1+0V!NDW;Jk0t$RuT~`1;-tf@G z%wHEYjMq2hV|opi^wt%k)sCdXJr<+V?z*=Y8r13TmGvyhFNXZK3l z=DVZNlxQBMyrbN)27JR|n6brfh4kqA*Y~sVr&0>OTQX1etrtv@27@#Wv&l;}SeD4z zjZ1c0OVf`&hoLsu6gfmn`fiAzmy!v;6VHF98@j~I-0{D$F4QA2{UgAZ*^@l-Qbxau zRPv+@_gl^xyFZt7%G=L>-sIx+;K2E~y}(D4`*|Nd)ZFm=(}a68Ev!~Uo%IjCun0nc8}sV051k<@~Dca z=$q64=EJii36N2#cuFtl7b~D%{@P!4(ae)KS!~p3 zv!s<6!qp8lu`*D0^Ot4iMt98Ps~m5k0yeib{g$rGyRylh!lV8;NddmjVQ7V~*on!e z>Lxc0m#H-7#2IivEdzG>T;Qbk;T3dl#l3GiM-F|7jJwDC4l;vZNb zpNrUsLJMwb#2aWH=MXx4`ti*ieBN(?uIZFaW-2lMPcHx_)9swA!Mzcr*At{J1^E>= zWV9~iZ$h{$zTHpoIKn63)`lcZ25L3}4yuUDZd(UMmkNi!I=w%XZ+-twP#bmEN;v3m z>r4j|zx*C|pV_**u0N_eD?waf%+KVh29+xXs}r+1UiYH)Kj6Ovt!Jf_M@PnF3^cjx zD08T|jW|(}qA5+nzoxY^N9q*1e>laoS;4hDuSc)}P!7MDqS?fn7SUi>pq7BGE%wG2 zA9PQS2QrFw}EDx$``rpcqI9Hy~# zZpd0}xf0Rs^bHM>#)G!tEGj(1T=~B?~ULzJ9@%Sc;CJYZrukzj035|6e7bV)Jc$wQdxfvTIju#Loju7Tp$5n?tJrUxM zBAHlHO-chD`%OHDxKqwC%RmE~55YB$hZ(FtJYpNNz%8WH;a4`j(kFQQAg2tK+Hh{*mu2sbHIW`-0Xp!8_#UKd_2%)(ChS z=8On-e;>So{uXDS1Y;CncG&@ajA{p!V@wI}dpH{x@_sMF3cca`Rufwxj);$m^W|tP z>x2Fkoh&w9U?`P`tY^44!$CCzB{d+&r@Gb?6eTL!c{OvZD+~AO02nPj6qp8G%MJNL zg0I+rra*+(V(k9z+3*?pG^Tn_Op!<+DC3jsc^vMwnSataPsDxwP)98Un6!SQ)l8oo zkmaQ*Ty)t zlViPmB@G};3;$PU+5)_8#?S|;FBJ_Rp?5YN*IP0nxCYc)4)_xdZdTlDCwDtD6)R=4 zE}AVD+rF{ZW7ahsFUa7R8fr!7|G(0xH>SqT+h4Wkb}ZKn2N9(O%JL%Y9X-mgNzVQP zFv7Fc67$-A2~f8W?|jOJBa>m;t;Mk=9V(3%h59RM`Mxt&m67$9ZLYyyF8~O5P#@8? z@=&>sj>ufKYuv8;1SLs5{wO?~7x>d{#!lj<^_#>ayy@@+zkV6vwm;yCF^rCWw`bO| zCImugMq*XipyIq9p_(@}wh`|B_v*4&-#-$1cv81u9quAkRg$G>Dme0~Gmi~B9vM-0 zO}!~oj-*ja)YrAX{!Cffzhb`*8veCR!fY9XO=y(d49deJf==nPS)KNdFe#c+?HM&yTi5(;KVHG?DyjgJ zk$e2H3~Km^xbD$T9ho0|s}u{hjo>@7W!}rmV*`$J95#$Mho;-w@Vlo{Gk;50)1COq z5{|pHop-ET#$WD)S&EAcZf%WitFum#8sY=j4E-B;<4GKY{Q0i~oV1?}yY%XDu^CbH zNBu53%?p@kTs#W|PhR=mcIdSY36%j{5fH{FkTwBi2w;!fFKko6_;5F~^=nq2&Z-0fOk}LR5dt%#4-4WK!)8s-(cEA}d=Xu=VkLSZmFLb$$ z`;YnmmznbaP!gL4im7$oz|=|>p0^;SgdKX}vf*i4WKh63euw(OhQ4pvC5 zm*TYd?0|73O-@=L#+a=7>rTosW|16VVjSOTR+jAvYrA!d5 zU&Zq!_~!b~OqoRR25nr%KuTH4W|LL%n<9%ty zAh#fe4~63pK|wqtk>>?UET&Xs;EZq}>e1|Cq4M@;5xo~q^&j8!i!~UF|3v!C=?Rh2 zzCW(vHB&SCDm_SIQc|CzfI>T4PS2+sE)V;BZ&-xas6?jlRbh#~-d6v-eeEb8snm0K z?3eLHzyI{FLa>~~Uvt0TZYDWY*K_-1t+bCL5HaDW^Y)DbhxzQWBe&}_g;q9%OHXSI zXDWfGwG!T*?G9!^wXetao1@kSBdqfvSXmaE1R)$u$ zim!%6K$)sSfd)t0(q6%h#=4b=)3-(Zs z5By6WscexPoKxOEz1|o%P>A%RyUGBhBb$8?{1@7cU*&`Pv~G$vo-+(?jc~5O z^y4q!#&C)mR}hKX^L0H)p~in1-#i*!J_L{=kK1dO+{zb=riEb^L(vxT=Irz25e=T_ z`Ph#6#+J|rcSQ*kD$@}WWkzMdm5N%)Y4Cn75hCXA1Rj)Z3>MK1jE3vBzoCIYrIpj& z3OyUu_tv?X4Fz0sP&jbPZ9p!CQ<8uB8v{VTSM6={`JU6wIkX;enZMFjtSdEu+6n|% zgmHhrKaH(FEod0SNO5sQPZylB9+dXywIPiG*T&fX-FxTTxbbc6oVXrXUKszcg4h7fX=K&7(#2v&0!716aH{yoKY;%n_ z&DF^XhIjb@pc8y~uTK3*|KEGl$KPr#bnhDv24|>T*Z}h$3G#e*`tE!LlnINM|bzzqg0M*>Vmb`|DN~! zM%YBT6daOavE5m`|7ytF|CQT0JcrftJBvOui5I^Q@(5v&>dg-XTpP}3h9}S+quEi$ z2&W2{Q@!S0&=-df(^&B0jG8_Y_)?2`d}VWLs&oix3M%HA^hX?T#C)jSrOD&{9D97E zpb(4r&Sn?XXCsA6i9SL<*TD8C=vb9-wxU@yn-45uQlj6D4}4h}H50#dUgAdL#3hqe zr_I=_{Lw>Y^m7kvR_K2%`qleN|Hm8VBLW2v@kydfo7Ula{&3@{Ic#?1#0s}Hs_*jG zzhM0i(m{%_t|kBz$nr(#Hkq@PIRy?Y_ozq4as4vcQ8wG&9^sJg zY&*p}L$INp*NS!30ews3!dlQ8bunqsjMdoV4krVS4IjuOFo2}A*Vb4&_#I0AkAcVc zoEs+=MpW!jK89n{yL;l6x^8w;cyPpp(C$6j{IepGA3mJ$g*B5hQ2;2N^b^9d-gh;8 zKJ)WEn@ChsYgppA=CV1^k7hd)t(F&FVBh z2SoU+ix9GZ`tzqRz+2uJm4h?Fd5F86Qs5ejmH789!-D2(nmNk}#NZ8X$HvA@v6e-p zM86@wnJS3i#{V;kg_l2Ybna=sBt4gviE31e^oXv}3zdOsrj7u4f8U8(-1WRa5$5?M zrGk0K%&lDC3VGv$JynLdP0BNCi(z*omsb+D3Ewr}e$_`mL`A z!)-x>Lnus0C9_`aztF_UeN9?SmRd>Q-!-HxZ$uDbfx=c1$>PRdWPoPEi6q@Xh6 zg8&V>I=kHs{ckk&uvr4%Cp(cqeEJAi_(Ai`q&4g!5~E=GBQ3=Y{@qAMC~7O{LVP!@ z=q$*1o;>V}7EM`>ZrG^!!O%zl{N2QC;rL+xCx8X*jp!$xCtWTHkbL4UX^gm{p-ByI z7tN0E8AR-Wb|RrzzwPJodZ#oWRoz6Z*^zJWU|s=DVAXi0ecU!apD9zH%0p{%v+29 zX;M!gbQZc0pnQy@UH=!g8o&(803jKmjK5sEmCnH_S9(;q^NN@n+?l3+-!Q$x2;C^| zE>y~>P!0`o@0AA!;{F%)zCbPFK0Xh@BMcr+>g7&^nZLPnJ&#zzaUTh2wu!!a9^qXH zCd=sr&^MHr$CM)tL?sMkAyyiq8I=g*X=>SuX@gSjlo~S7UuzQX|C*RZJ78$;(DL13 z>`ua`ym^3kzfGul`$b$c--fE@N*Gw`^t5_NN0V7z76lF+q12wOf7O3wOU5lJoR^Ij zpsE|VlCY>?XJ!lJ>wqS@z7upX9_JQ<9)nGs{+b%DvxH^P}) zTvHD_SG5uyRHo0#w<;2Ck0R&n$Aj)u97esT;`bJF+7&{bY>j2sn&gyv?UA_^sE@=7ZeJZH1fhiZz~%fm6*Y#8|K$9& zHQ{Vq|4jm%W?x@A0meo0gpyjdhdJl%Cpj{B60=oPKy(7+_Kg#tFKvhi=Ms;Vvj%Yka-dWhD z)AnsE2VW+Mw%K01YeTMWQWktpvl zh%vPu7Iu2qCIz{(OU%r?ua+*gaL{Jd(JE6c0BJ6g047_is{RTR3k|IDo zfkaP5dCc}hb!to9be~OOaK_+9$S)^=k0GRAuavXc1m;4-BAK;E`H>Uw;FEK@&Aiu4tC*~P@l${I=v7PhL5VY1NObbQ3CME z;B(ub*Dr#xcFP9SU~M)2i93Iy;3GSELnYzn+5dXTO%M~K2gLD(9HxigAyxma97wn= z*)<9LXJwu7LiL^O`O-Nbz9nQMcKLe%G3Tdv=}y-Cp>knqMr)jFEF*V3#Ooi_zugLm z(b{rNq^2~&j)c-hDcse4l~dpYjwdrl4zY9Cp-%+AD@;4Um>%lXwr%Qex_J?U;y=Z| zVjQ`MCb)Z8-K0AVyML6D&6T5^R+$uj@C||C8s4X01$Du$Grxr|mM4r4TYd!Md3-z8 zlOUHAP4<0icutPY6?7^0LmW#^zn>h=<+6D5Jr+=f;W*iB-8D7N61H>`16Wzf?;QcUpLlV4it4wMR0mw7#Oz{|MK1f#bamWS~wa z#r2u~s{qmlgv*lc@s@Ws9QB7c zz8`=3B%mCD&L?l-aO7^oKmpbWx!AmXlNV!Og8*=a2!4Uf;fs8Ly;~Uw;Fr6PYLbKM z!(X>c5bb=ZhV6VN_jOBu?=fUy2tVGM*>g%%7diLt2w#>Vo>QXEuzRDaf>4-mFe1gr z8_&xdv@v5EYqgNBF;)=4;t!=;-RUUtBMq!t_m8}3Inv&B!AFQl3Ixn-M~VKR2=!xX z_X2db2!#j{_&U{>0TZ{9e`RKU;Av?qvwQeK(BB2gzl1WiL^(}GA50pG2AU#~^2pzl z{RujC%vo^?(90A!@N*2>MIpln5M0sMNEF8=vL0GyaT#Rt%mOz$1B7obR3}_y@qwE} zkY%h%To}ngvxsf0SY}`Y#Pq&*J;0VD&o-x$fwP9i@4<7r9wI|qzV&Q3j720tb!TXL zIl<|$leyMfm8!bZntQzmCwwZ|HcIPMRtCa$>Q7LC1yY@r$K+l2or2WYYVhf}|JfXQ z3E>d`KbF>hqx;jtAJ>0=j(bi8;>HyJe1LB?O`i#GTdm&!af?X*vcG+OP$^~f7yE~? zHiBfto4I#Cy1%&_;sJGDAg#2=0(FfYZ87#E@i?C(=Z^`+JYqfaT57>VdqOJviN2{c zie|I&?AwJnvf&AD^fgQ-Sya^2>m9!keUS_0{hPUptaIZ8H-U89g!$IE1pO78=#i7z3T@u!aYT&0aA^ya_3jz zog6I0`E*Mhu{^82%%#1!*RZSy|Mc5y7tS>4y;z4BU$+Z%j>puiVe778d-C5w_ot|( zAVU`)k_HPS7p=rv3O=ERtGJ^;GmO3|(yQK0orA7Q>w_)jSn-6MOyV;OX?wXnvajkt zCMWCtGqrsr%OO?5E^MbpUdEWWRD*23L-vIaRHi3yuNP&WLSKEz+<%4ou{}6C0MyI6Y9zCs>7F=b z_WVDpOSpss>7Rrno|U=!1TIA07r%(Ux?&Wiaw4h}GXlhl2HFC?SY$2*VTiWXI(-Ev zn4Z9p>t8hjH%3JBa)Wm#>(RGJGKT|{r7b`xy{vakPyLIC4$t(+ixuh$Xq^176N9L>noepbjZqEVUm0NrX%E#-s?9~V}mq&9i<~kC`^^t|MB|JM&=&H!7 z=sl1cDUwXI2Ic(m^?~1yGjWPJ%BmyKpO^RPcaGdnA2c1i`;wrq;6&^tC_Vu6yhkv>%p+ys}6ec^wZG z1x#Y#Flm@8$7#fs(=cIVU62Nt$VyE>->GtdE_nXD9VCf=#rF+*^~T(+Fu%N<3u|U4 zcS>jj()@Ly_4X=wW5h@x@sXd6DWSu5&jr1OhT4Mlm3_2fl z?>R@9sx#19#1SWa`{r~M`@~I%@3qt2GG9){x|V6+p^u@R?ixjT&*M>7V97NhkRzvj ze>CL#jwA|LC*igcY#@R9o-^pUTdpypnF{Cgl0T$T7j4|G0`Edr(2_0Qo85wTRC85A zd`u}##6oZVG4ImC#t_hevTz;sPGy(i>o`7^k0RhnDbtm|o4x~pLi(-ok3Tmqp=tB1h@mbEXRD7#0M{23*W`#t^Ojrj>E?};S z(=tjJ9uxXR-m9r|mPt(eIp46l-l(S!$g`+mUghd(lx zTW^*0fn!dxj*&Y%lTRV9h%GcVd`y6A@2>x%46G5z>- zHhW+0WJ)9S?!pNd5c}FhW-i^z+lUvi>sabR{&@>o(6R_DK`_ zAk^#Ep9m$+!wtb7k_Q?`1cE76h2L^`gTSTJK(8RHYC4BZa*DyPKj)3Ke5IbPEA>!%$r|@uA2aC`tyw6BQA_Q)zIdZIzfdF!|6h1bIQqBYu*P4kE zOu0|rM2c4&EdJt#B^Mb0-KApH+-@juvLMvtGCWk+J~vrYhj9VG1?;MO@ac-cT%yo@ zTGJYDhE*vE)}MKDd!jAvxM-UO#3Qwz=x9~mYCOXCDY;*I9=90DJoso*?^lg-Tg{Dt*ZDq6}2<%02u{>?q}qga1W;1RuX&av!>@ zzNu-w(uObhKHHq`pi8n;xZuVnzWEn{)vfbz={1^K$Ci_m}7Hjnsz5Y#Vb^5A0OatzTIIl>AA0mZi&Dp=}-p zI{q~3ogUsn*30*or~^qR@fxVHv$_@JenlEXn|EzjCM%4yw}EY9PZkTx%ffuQO{!M- zE-pa^F`l+Z`(i)4l0b|=rbgc4VB}l*)PWnW%xKEoHyA{=A`CxV&JFz0={2t z@eWY)*m}s5(hKaNn62|?&*KcVWxlpeNIeAhH{eO}n1$0sP-E&l63b8s#&`_{#BQQF z5a+Y$^L$YJS4<}{it;+xy4|nS`{NlqD*qP*j_)jd*1K= z>M~gPjMCp}etKPccA={ci{89O;Km#vDLt-pmD|}bj-$8P!qq607!|FQ(MyUBuL>*A zS@4`8qQ!W^EjoA$s$ zDJ8QH;sl@QX2-*9n68otUXw1o3#m!_{!2rAe+1^Hhtd>TLa56RD>QWDO4}mJN=azj zGB~e0pTYvn^ofvT7wDpCSlAm1d>5Ug%1E}lB@=rpAugSrT7RH|)`&y7;0=@@M$U33 zsePs3ltHIsk2I3VTVMgk7#AcdDpZ4OqhC$lF|ADZS<7kXprGq4# z1f-s?G(^?FZgf7JI@6gTkpmcziVTVFEft&S*$M6so*A_0Wcf}pyBv%JSmv3rSr%V! z!WumTOc4FGeIpq-#~#HMvQStZwmYv`F9Mas`;Pn94=XliD_q`7;u^lLVkNaK=~=|kb>$sh?5L%SH0iPX1qqW)zf+_&Mdy$fx9j9 z^N_e%yUViU90rbkkP5P?!Ka+`B68tEciLtp|M?4cJ{&lOIIlYI^Y>%Q?`mP3Q(xn+ zft=~W^vR%>DK=B9z>fH9KNH_O!(A^<+vyqK0W^aT>X?v;3j4wMmXPE>?tQ!V_kWs< z4woJ94+o<^2K$H$hEfpl3hfNxy@WLli2Yi`{n__*T|@b+gQ>=90l7fkrlTn3FI!4* zBhwgaM683qu|sUYOhg5{L>HQgJ|CDz^T=th2g5BN5z68ZAgZ{BixA$>vt*8jnEZRh z@V>E5Y_$&@e}NMw+snNlb|t@0+;YS7*+SGc0fPH9O#+?Z6U4~(_2{i=q@QJh=<0#S z>GTQ`9Ald7VdI6grMuFwFVULMW`@J>Mv4q&9PVrBzA1lr9pys$TZ;(&8^tBakX^|h zdGkAqYZF%BPCUyV?Q4%+0vVx+E=rHzlr!1Yiz1vaAp%Az>s}+rt|6<7Nu(c6@pOI_ z4lU`5kjtmf>h{=>M>9&dO+In|vWE4_*N$ntmu?INE^5|2Nv^$2LN2lK}j|D6a~U7O>08^8CGh&uqO^ zcFthpJn@@(QWhz_R#x1vxc{yhw3m_$lXMJ50>FP3&8hGB{YHiiV!unDi9jR;P!7aH z^zJDwh;6754`-3b;+zCtu^$TrEWb`Y#|rKkV%-d7Xf*P%TDN`0g*vlA0F>BE6Kl~9 zfr;sUf**SDW6k?=NzE|N__EU43fnu6gRyy{fzkD7KN>p-aDpwkf3AB#R~^lIxE75| z8jm0B{=0k(-?&UfNM*CTGLX;6TGw>$Vq~Ns^?fXA(#R5eebi6f2u74$l*c|km|`vH zSO50>EnYC(jsv=;GU%(1Ng;R}==IG@c2&pl;BssK={2#qWBB&eVIYDBMC(bV^)ZvSNC5GzqlK09dK$B73pG))@v$ zkam|LS*GCI>-e0vV-l`yG##5AibSK(w5Rrr_IGI-o`mSd#D}e_A-74iih5fgchZO> zLqFi_?DCJsRB~DH-lJvk#zwX<9C7kD%a}|$DSHlB$18rahiKT0cf&@f1cZFms9Ua5 zu8euXr|mH$8VTH^A@)C`*uQGsMAK-CF1xvaxiXAubL4qbQvJ%K?B}p3=D)mrr~N2h zQaY?=K9MqFwPauKNBgUPtVYnaRJ@THop{1SX+`IF!mlpd22R_8jpUQ|cGee9%) z+THa&{_^b8q}!yTM92Nt%jKI@xx>g+;w;2VV@*Q2!p=-4_hf&URFX8(_lfCNQA0NP zy{%^LRtF8y&ITxc6ozr^cRbyA{d7{Zm*7ul@&ka$*jSZQx(8O52K4%Cq+xIaJLQVF zxrT5xCfT(^!+j@Fz@`=D<2u}dF2E3JtmAm+1(`Dlt?{bueKurI9-ECCp38TGI*cnb zx_{qF@qtN4B9jiOz0U8iMrY4}XFfyb(m;tQet(;OyVAbi@JGs5jW@^&B1kGP-al2JHmD`pRZnu+w)vUsSM#65fy7i&tu-d!%-AAQzo9tet&Jv_3O(| z^t>hDx0I+>89|T`z|9d1b^(IZZGd@&y6f{gmM4HKILrwBMsdX1b1x zAVG9#()GMo#35-1;Xfna!H}e)mR%(eSWhb*3YwhV!#cds=t1uECshcmToRJHE$a14 ze^|WDwsj%s6G=KJufX7J&(@LAh~L3B zeScX};GNf@PFFU5`du?3e29EgG4Sud5cc!=joiOS$zQ#rc(v`+?Q^a8xxLbZeF3Tw z0<-L_Y^>874Ec@-?smNp`WdI$+|hdLb86t^Q=q@CJrM$b89GH#fQbPu-(z{X@%!m% zO>}_Zsg!EnSsl(g=L2hIn^MssUd^b?!O;m?Vb3Qll1X@E)DH-bK1!R4|tA zB&_}pM%_$+-Shtp+OFzFnoiIbhFSE))<}W_C zNiT;g4w4d*{Pt$;iH~o6XnViX$}g)TKod6Vr->=t&#&K!q9AaMY?knMhL!d0U~W8H zAMR?ZuKmAiH>JUNPqAlzI!W%1IHt z{rJ_P@0Bd57~kE^s7&-C?n*>=qulY-c{ArBZ?Q9DbjYNY@$z%F9d&)chSpWK-;mbN zHdoN#68R+7vj|tBx=hFellb=V8|t^oqiXp4Y(yz|lka>O35p zdOlVYAG3{A#`l-*oEaUcs8yO*YH@gO%ny5hCG#R|ZVbtX2Vga+OI^*#X}rJXkf?Wh zKH}5v+O38|Z*p6|_x=9A7zRVJ&+0bp*;(GyJ{^$$7t3e`0=a@vz(HKke`oBn;n}1~ z+pz(BSLM#BqDFNc%y<%&Gm-hBRZk#`Rn_FxpJaR9I_xxi0$JX=s6`WFJv1&rmwe2q z@RR+oia0u1UKeBdqBSA5x1^6uBFS~uu~yM>m;E^$3&~FkGG+pjJMJ@*Kf3L}DsnT8MY8^7ZV_m^80i^Mlrsi`rXOxGh z)Ooo=)|+qZsFd7t>rCbzZ$Z(6k2@eg&*GD3w~KmJ>qNLB6YIdP%lKm^ov>XzQQx8s z;JcrSeVEE;+t1FT%c=%_tDSX%obQ{x(o7s?4Bi*^f0#P;;F3O=m_6A<@5gwlDl7Vg zwbzly{ch%ba2xs3w{ayH%2uiG+0*|D;W;YuPm&kMJ*hKK@E%VPNsud}r zWl<*1mqfpXf8ZlAT8nXp-;NI>Hv_&fT=z>ctH8a;pW9v3@Y{aGv`%WIJfBu zEcV*)ZrnwIARmf!nUbyw1Lqu&@HmxSlBkfVmL%7#0q3NMv%FcCh|too7o<9*{vth& zE(t_+hc{i_;i3dpj-bCU z2a&|S;vO*hnJDqQ49p@&{AZp#e{JEt#W#Z=}9>ALUAl{UHY}7R4{Qg3{TJxsA@|gel2pK-zb^!MTeu5<6VnkaG zh5D3JVy{?Ee;}g5!CI?bXb7Pw_c2YYN}Is6D3$h6b#tc&g4WNRgI|?zBbxVy8o&Nk zxj>(-njrm?F$$Qxx+;FwY3&%f+6%S|glHOHWcR5y?7Z3@L`&)dfXCufAjd6Xci*^D z_xxW5oN#>IF5wqXKNCi~bASDa*MN+tn|bXigDe*mxR0WP1fKVTOUY3FD%)G zsOKZcN`qQg2PtZVFx*Rco3YUsJ_w>`4O|bVbAM%HLz=on&VCYri zxr!Hk#$j4fe@{Heyn#QCg$G28SD>32|FT$LdWvcKwUREEE_K}XJL?qZB(J6D=o^nw zt;#+bM#?>Q$9B8th*A9c5}vZ9_1+Q{jp$w*YMF%@3@X-Xn^|1-=pHsQy> z86qA!D()hFR}DuPjBVE?d2V_*zGs=&Z}=FJFSg+s@P&1~hCH`vV<(Yb>bOLp$gG$E|Dt1yHVz~>yF>h0sVy`c$Ej}b7W&cdW zA){6;qiARD-25_BGC|H2KPFBVM}=^K1!vJQXOKqeA0eEJ?xmd85WEot@KUcg&UAoW z{S6I-GWhx9P4`;sEHNc)nE%7$Tm7r9p2(%Xz{vBa5t7E)w>>X3WT9Dope);x+*z9) z9nfR)qz4=j&6g8T0SyX9PQyLlq~uXk(;Bp+s|k zzA-t`atm|BK9%J|in;QK6kO|4cS7?dW`etZlJF@JNV2%dfe<-O_`qNK$y}{pW?Z11 z(d6-_%5}LU`^7+SH_g`Cg+wXFp{*Pbs8bU#O}6l8oeZnfOwNH8&Y`4}uF`1I>3R}8IU+>o0sO1ZIDHEAVtm)>OsN>dM zyVKJyT10i&Jh$zMF%0&lTAU!CFOyk_FQw;?D~k`lEb4DO?e54o(Y7@?HTMqVC~*oP z1mafDf#6Mm|Ki;<8LJ#799$%Z9S$?_KEbxpEu1@{;$SIh{Ks5!tnpRBxy=jD5+(o+{TeLEHQp9PD!g z#NA8y)TKm?h*x*2O&9P`!Bc(reJbmNkCug&Ifj$?Io{_k%V&gXmA;7WZ`QHNaFbpp zdPYL5L{Sy_I?>^{n~(MkcJ)eJM|=hSTNB(Idp%9m$MHE@!5V*L>*LhK_TkD3VmV4U zKQhUqswQ!V)8mhJT$yyU$+5zk{`h6kSbP>x6QP24N3`2C#r_tJtXPwW6lFRpsVH6- zt!Gtmxhy}v@&XAzEouT#i8&L6KX)sASyLLIH1N6Cv-b(%B6$MPbF1lePQd0fE<5Yx zp!4-z)}uy5M`%q`jT2C8f6X<8NF8Lkaj}I9!T;A0@Mk`PT)O@j+#vAa_LB{Q0lG$e zfp{)PUXkO9bFV?zhe|7|R!FvD20K`y?U|4YIFyWQ9XKDnKZM2!Nj4+@@tQy!XIb?M zmP5*;9%|8BPdB{)6siE`rVy@TN3Q`m$|pHWD+pVB<{ZQPzr|Z{lG+*mq?gqO&CJ5K z1G#!0*H}0~q`#g10wHyU#}#ByBkk~gYujU^BZ^ zL1N?C-;I?CdY*t8QC9$pUJo{2lM9p@X^JemBF#!qJ7>M%-2l7KPa$+V9(EB(94#xq zphjDA2@;NZiyWPqrrEvdzu0epGgA}z_>cbh8k2s*-ToW{e$^v8+qJ_CYYe$791le} zqoiB%AJ9`dC{X&`1EJC^8z}2%8pRc}-zT~xt@)z%G)eMBsi|zHa9ficEqEu+*Pox^ zbYvE}qvFEjUH2@-^+cVr?_^zv04mc6qa+#Snc6Yum!RRc>q9)Ebq7*PtFbI`anD0k zJqPbI4RDD6x1uYaWd_cdr-R@BpEL=)mMcVr#tvQoukeKe=J4MPSXw9Yd3A}h=QJwf zK1vnGLUuHmS^Q15$51aOzdg9?=4p78V-ge@$%tD-_D6BrDJ9xrl$YxGGveDknsbW1 z3K^j|-q0=|7ff6EEbN{Ej%XpV+p>=_?jIxyxVmvQZ-Smm1xjyQhdNqIW;y$7JDB`O z>u7n9msrc{6jC?->B`HbGFPi>ZOANaiCw^rV!{tq+vb8?ByU)$Ll^M6r28fT(gJhk z%a5&bdL4mV_A;_0BgynItv7ZmhIW=ZpObF0dd zUpI%d&WF8T-!*aKJGFOs4#pZ(lozsO!2*I;0VylA%T^o8**jgc6i*0;ee|JFg%ZGJ zUkU?k8!j4c$n;}(dUx142tYFLMSLQd2K9*s7@X%mTh>x}kib~PjvFEnug*kpB_Y^L zs3g?O50@eSl(#L6POQH99FV(n`5W|c_&sL0eP2KRuPYkJ?f=X88OJr@G88JNqd|UPI79N?)BG5;0Ix<-55$Ft4xo!ydlq)7?++AQWo5O zk}!r^@K*6>-=b-&D6EG2TwxCcaMbK;c~BAqLyek<$Pe&`L#b|r2kVAQhd`KgwxnS1bNsDo%>rUB&uL{=E znbqK*5G>RpP&KR7zt>kd9hdmFTSs&_TZd3HT)kjlUJXc5RFE~F)ko1uU0(f}>i63P z&eHmOSc}asj&W=rHw37J%Et4b#tt69rH@22OxmvgdmIs+zp8*ei3)0!K?cqeXFC6{ z+rPf=$xM6K5aLOLjVrGjQw1Ns+C0@UulK3X7^rQ*T2LC5S<^L7}F*&Ib zn7Jd9TxfLF^9tis@${DhU0zSAB}*_x3>)Z}C1)s)Dy0#=7b&*c#ouknkx$p-HA`o( z7teJ~kMJQJa6inX@+r=~Kns0s-9;niKkTIl)ZL^1K$OL!5&5ksL4vsxW)%$UwozU( zh*A|U35cLwkAdX5Pj#wN@9pEA!T?KGES;i*5%muku|pg zhs9_!l+1mUFZ%5mc<{r6D_=jYK}wKuj(QAj0nk+f`hmE3=;?m_78s<30Nn^>!2VUR z=E-{vEX-KP2*;&C(o`~zEuSZT7?KxO$)5~lRq}0K$`iLRcH`094fCW%5MPFt-b#h# zyct+J6CyC1_AU#wTwG@{j@|qwid*GFj7}rZ(1mS8XwyJcrL5m&ODbJ|k2HE}+VGV- ztG1cUGL-$(x2pVTa>=~gCfw&I@etVi>?xhnXLgSN*vUuZxQyhO89_;tPr3>dmA7+7L* zbk6F1hP={EqJ1ThS?CzZ8^7yGu4?)UK!nOk$s4X$xb;V)={Jd@rii1mfN}TtQ%-Pg z?H|u(tYDwfMSh&dI9wsPqx!w)a^REN%%p=zrl7}67?l^?&_ra|Xia~sXUR*1VV(F7 zb#(wt5FP(~Rd?M0FrGTU`UBQwrR9(p43+!3&SMenCo($?QVQ2&I~#k$4&(l?ZYOJj z{WN%zL5T5(dpKBfVi+*cg(*jOqGpUB+4a&W}N;Lkqa1y4n7{!o4^WUY?`kXdYxScSdyq*SxNAE z{8RBB)pcVry><05(SeeNOy#=i7_3E@)nwo#!+x)^d7e})i+77&u?jiI&hSq;MCuyZRi71nhrl} zPl7!QiFB7x!(W4fzwCl)eMz|}NmF_e_#s~H+h!rpy~#;zKUJvf#4o1`;oe|lRz`=K z=@EM@EP@<(PC3>pt1LYA*q4T}Qvo5Fmg%;C|2i;q<~JZyH`0ftE}swU`aVS6b`Ev) zbWGaKbiTdo4AkQ1u>N6y@sGNaXl*0m2m*+pw#5Ve0?##2i%r_6z8tC!QBdgnp4w^? z0~(r_c-DIv`}5V(FeXd?W8o=pLyH!NI*Y~g#=u_ zOi+RCp{uLqWL@Cf48q%fF(H)*mpFRQBH`$ZhdN`%w<$lksQsBF{tX)eC3N8H(}?c_ zi8x;_ttD9U#P-z&Mm2mc9u|eAF1M~PCe>(zMFLg+irDha-+^DpqlYI}<@ zO5*X$a+inB@?JVg9CHGfYI$V-{##GzmVDWS0gg($r!mNJoBoS0e6i|g5ePV0Lk4vY zI&Il#dpO-u9Zx;*HyH>Rvsihc)kbxb;B9FZC>(L=4u`!z%XYnqWMEeNW~nhb+Kv~r zdI0(hZ<} zS(ex9a}MGP!bS9lr4@%)McGXy)DF1{BBiBt0cl!2<}ygOl#pxeqOjmg8QUCdtHpg* zDx3AC&pgwjC*p~b&>_4j?%b15Xj4>w^CKL8V=+hXQMI%FF_MQYwSw31zuM)_+n^ZU z)f{L)f`2P&tgUF5_#b0wr~m(CEHTGHK`(x&oFL19VR8aNntQavC~h1JoYA}QYvKM$ zSTTCYLIW7c`+Xhl+*rtO0EKe4Pp#s!D3Dm~&N?}xVGBj0U2IYd)zDAu8%Z`6$wHr- z*zEgd=jMmKn3S|WJw2b#MQ%0fhE+Ii)`d3f1;ei;h}YJ( z_O-de=bxe=o@M)-{qeD3>020~Pf z!muPrPd4E&F^@5z`JI+YHn!vjn^zk{_Dw%@!y_!d=c8oUf5FpsJTu z1&SJG4(AbLk=pv2*)3lb+0DGQI7oY95?R#C*;{lx?!(5YHUObe%y^;w7d%<@txml(0GVCYpeC9xB~a$ETmbQw`R$vdeokyRoh*ECr% z(=4t;=ic$~znH@VTm}6JcLHv*zbs5A3NLd@fgXs+-IC` zHc{ioBP-E-6y5erCFVvta%W8u+k$IX;fR-fRd~+!kH%oAF`Qs5Rg*aWqFj;iV{)E> zq0am=NawN`rhc#W=4U~zZ#^ZmLR6DflzFS|TR#M);LV=5V9YJj1#aMl-5%2mFiVCx zH!Q|?QbAzoN2tl?s~LFY=RR@z?9j}=Ub5?gua##K zei3TnU)173*e36XgtNRCody|7_2MqFE!-E0#0GmemOXUD3w%8~y$NogF7YoB1+7+b z`Uft7;|~j3)SJz2BR?&dne0&49xZ5OtVdcUSQRg0vhu})mZz(`d$@BDeMG49DpWas z7gwG@MEk1TCqQ@kZ!W#z)s7(n1|iNyulWLiJn#8+pr!9(arL)Pj2eSz(e<78Oc$u$ ztDCuSFz6!;wXN`Or{bzk$RH<*xW&v9j6iG<)+)Wj?18x`ey% zp;&3BN4%GOZ$Vq0>1iVD>O&`upfiBG+dOfiqPYli6d38t@rpQYI|tZxZ-8A7M7Eb@ z@@=2l~VwOak;;%j=wV`Kb+ zKqJT}MKWa)Y;uJy#}r-7p9#F5cW>h%YAWONwjYJ@(aFdz4Y#8v!c(-moWCZ>hd z2;*Et*+%W`+VBAq=7W63A*2rosIk_qj^)2C4evH;(I3v8(gU3GM^GU+&j* ze1$rS>-B=}W%9ghR54@_vUhXV4ZjBaIvCE=tOC3)V&Z$m@vU3mVy-Nz_+u#sWUR&j zM>h+3(Y?f@uv$=7R&da1%{G;Ub>T%(Iw-yu@j-FuFGlJ=QK^-Cxf&x1^@EUXR-FKL zVCYZwqurNX#gPzlKf%DRLVEY6U8Fsyy;;RL$NW8=N^k~~>Ep8j9+jNQ^c+b~;_UKd zxj(+jRv6ELW&j>;7q|OFCOh`mA|-$S1vRzO?zF=@Z2P$^LX>2qHh&TNGTV8*5kUrS zYI@;r_aJa@`fNH6c=2L#gWyl`G|WB2Bu~Z8Hq|c#(28_K6!_QfU(fb2WVV6miI5FT zkb}E+?1Jrkv7&+>dfb#=v$x8h<57m8Vbj7Fu@|;3oaEbg%!R1J~@739O#{WbWIo?ub{y5DZQon`gFd0W;!U$ zdBXCdM4QprC-*V9j^!(PDGf;`eq5E8g?o$)5KaRm3^*)xln$7XcOb~(z@VHva>8BnO0TAcZ1$bU5zc@`k6aR9(^y)u0DkE7P_wRbqDFRriouR1pL`QE@ATp z3mvs9EgpUF2HI@$LRoFvXY`MJ%R{q^7Z9zvTr$=42v@)RXEmYSv?KkL8I{IX%G`Cy z(z4{PxP>HwOUsJbv)J8Ar}1)WO%#(*XNX<0=;D==p)akx5D z@;`D&t;ZFpMVS>&!hhd=#`#W=QdtGHJEZYb|3M{x(Aw>}O_0~kRY4=&QMV&zg!QBnm!k+-CwNkx>_ZrBY$Nx1E+@M$Hk9sD zJQgqMNbr=n{FT1GijhA|JhNFGXDCQyZTK2v8Bt^w_El4S%d&WqUGzv2t~C+>uC2K% zB_Pc5JY4@uw<-Po^vwZ3x?&t+V<|JLt8xXLv?b4gvSgX}*+(O6*%NfZe($oJ8Iiwu z`;)@<)s>`_*gY-#ML9A*4!>S6fbus59!dtb%>|zxo0#R%#JkO_h9B5D|wksDwf|9$AT{v23fvs;~Z}lp&Moj!Y{x(roWE@nX z?W6_S{`3l{pio#@!5@r=k(Hj!?8pCraoUb!oeD0RFW7@~muXeJI@d|!y$Ur1D?bZX zI_yrL9tYgKZNRSZ**}bET6XMy`cRJ#pOzN$uO8>nV4l2N&mDQ#&<}$TDZ7~hG^EHF z{7-58;V$8rTPa_ky^dhSjKx2_Fpm#BeQidS>KSeDikfDdk!xt;&1PH=+`x+rbjEia z9$lk1trqu{)8c-b`J%P&2Ajj;{@&o^Dj|*U>9?1}pCtR;`13CXGxPKF$&*)cBs@8G zt)W5&trU^4L~-m#Pd3E`Wz&<~Ki5#91`{td9~CxL2*!KH zUAIQue6F4@KC_soUJ;q5lgwKKjIF-iNtU8nf0>^i^3`W;TjDTZt1q#zCvf^`V*y#M zZ>|>WN2Mus3uFA?*NHWG`V=UlCwwCd+0LEUPhiZQzp<4L+sMz0|6ttV_%1X$irhkp z0$x!yf=5NSxH*h05S67rEgm$t7fGwp$0s1!+<^P9{KZzw>&_#YD5aEHMP!W_1eE$z zKgm=n+o{wjtqFwNQ5qXi*T{*~?`Ix|s0k~Xb^6P`jFpjT<*0YuE!ao7z$HLZ1s)v; zvder|l(fr0b z%3XCfj`K=3me(UZAUNirRhaqe7N)47@>Fb4@sZA zo+pwXO76NyeS3>j(0s@;QdJF6JnoQ7*M`Fc#Wj#R%%Hg5=iW$Ul}tA;kApPjG{{iw zbH9K{cvut-Y%Xb*{+*Mu@ls2rfbMA_$~D*t4DC`B=I<8b+W!E34sVc2^G9YJv7V`5 zZlz^(rQF&6qvNli!}crwX9eEhf(pd_=b;#q+qPCY$mAY6LndIwG$p**U=WwrGDOb) z?I6>$x?MVK_MY_y#up2-Pi59m1XTg36{c@JXqX=M0lNo8gWm~>D+ zm#9}wT@I(MaCl8wUFsPAjxb}35+_uwk2iQBLGKgF*A%1g_tb9zi5k}%IpydCrn6{N zT>Js+{71{MtKSGV#o{*W!}gmJ9n~!QX$$1jfo!HtS2_3x2(^~^o>P~_#?!!dS zI&DYPNvs5rkEglE3SBwWqb|{L95k51Xb>TLuCU97iHjJDid!p)-lb*4K*(yX;cPUz|_k8Gv^V^Q|C!eCi+&} z(RY=?H1TaMNG+`Uj$heYe}CKTV6v~DQ(Qhupv$&xJBlb?7@U3Ctw9Eji;8o(r1TC) z>O}CyDM1#O8vR*fW6Bg>5Y8m@BF8}V$_WN;(y-JuXVby~Z>V?7Jx7Nb<~rq8<2?6o zg?-3(C!*NGR0|Zi-X4Pc*Lp8c*_Oz)PMwFcX|<0?ED9{18qDHOmZa_c+f@9Dq?U5T z9}e7Ivug1=q)*A0xYhYY>9x8TAIs-6+7tW2^QGYdxj226YTuoz<%&_RP(|r5=uIoL zThqd)Nolj3G)i?_S_JmHS<+VX;%B_zii{A3_OyHuS`Mz9n0#gsX!v)Zm?lQqn0-}4 z%k1}}0ok0JJ#xZ7V|e07Uo5k_YNYlIEr$`zT4%KMP|NY0TudA7a=YuL*oS_0CAd$V zhs?}H1!s3keV<^VLYuo%H~asyI2-bx-$Yd0e`V`n$Uc7Cs_+;?OH0uC#diBec@w=K zlGzB4jdF15WV@#C#lw*2Y!bDm?ldul_3hlIL0!gBbfriEQdEob^f_%iA9D|F4E3jiV6{Qt}RCGrBnms z)p9(E{p=IdhtIsaHI}j58ZC)^b!*)_n&{;EIgK^&=9Nvxcc{tZ42)OK&QH4$rA5Be zsiPE#q*Ce$Oo2|pz|hD6al+`LY@;9~roKGD>Wl700=*gF8fyZPcsU4rl0{>~KHRs8 z62Lf{^Yfr!T0Yyk3Bl)veJGnpd_^^Jx?6)k4Vl1j>%q-zke~q?SK0d zeMXo%X1(+eQBR<_{F^YXAGNxV0Au%!nB-Rc)T+OY%e#6X?IPT?pALq1II!F3_}5TkF%m5B zbmva!DDB|_IY~1vY<|i4^mRborV4@HV~1*kaXwdhajxxiverF?<>Ae=|BEl;pO)s+ zwkK)ZmZ#Zxxd!OGGNMSDXtCPsxyJhx^pa%s`;*c8FRgx4Q{z{{OF$Cef>FRo^;rQL zVhA|lHBRfAJGkYO-o$Qf2+q$H`jApiEUwB1Nuj8wlv2pVqguo1d6hh}8G_}G#>`jc z66P57W;+ayhLGVg1rITv&w2ikQ=^%#(S&q5B~o{{)(-iSB=GTIHWPS`CXVVpk3YuCt2C|vkurp7 z^?#{miJ2JeEKQ;lwlLSREUb=t?PfFbJMBBaP9EYk&2-QoN+^5+3 zUMqMF4LFci(Zrr$gIl^?Zz1^U2o-#MRJkHD2MQzpHn-qV}uATlW^AwTaQP4 zU`*AK-?0L15mFraQ95vN?Z)3(u}uC&BmeE3qq+vxkMae zpR`Zhf1T061wDlXM6U{BZEneQf3)sXs!$s9unrqf^zPM^hkW;zyoxiaw&Cq=jW^A;>~}XO#sh>^_sz~3&aL!VgxpHx>p{kBx$U9t^D4yaI2^}Or zPu0%bs41zEKd(a_v7-(5T}o8mxh**L zn3CZ{e^L(<;c#kwx7~-lk}@`&{GNKem`RHAP6F@QB7JWSNjhyDl%Dk>eENuTWR5zg zrek$gyG}T8+4Gy_o)wZK%KvC2*2AYrNofA#+&Xrcq zOQ>~KlKsFg?rU*0w4ovF)`iO7vI84nqkh0HsEze@We$Wj{%dJ-1U^DJ^^tI4TJUH< z#g}XGaxycRAYNqu_i6v6vJPJ=fCkNK8!+p>eJ~8Nb4N2>YbMSn#=N6JF$l7{MUtlH zx<0lb6nOn)(AC3J#zqBcf~OPi8kbi*VXf_R zt5Y+~2}`1esM`~Nzv1Osx}Mb8)=$W}T&Phf&q2tj_-R}PMYnfDF*XuSFa;{6MQYve z9@YQyqBGYGO&JBVH}bA?oUk{ehI5x1?%a_aFjg<>srn~yiqpSOej};E6q3RZmsd| zq}jfIKaUm+V}rhMvt5=|1M=u^OBN4c+eOl1!|CSoGQNEbrp=gSM4Jy)v)Jv<0Cv$e zWt~{yHJJfr>9diGCex@Y%*<1VizvCq{yp}5g?KV78cxZh+JPhu0gw$G7om#ZC_FXhW}Lqm-$!_9rt8$3?a?)iJI zkDR|3Ox-t+uBeUrTV6}uKk_u_c=dOLjW+{NM&u5IO9E$K%ja-y3?7(bwMx6+W8%}0 zy@-8k(T&)sL}psE)MN%>W!Uc@efHSIR6gw_FE|HGCb}AFQI<*P5Zw4A*67^a87BI! z!8`;0h$Q~S?u}Rvke4=v;OV>C9UYdHf;aR=31QO%M82xtYlsUm$=co>O%S%>>57lc zS3ta%)R%_K);8+e?uo;B7piy3`NixKhfXlSoyWq9^XG?|S+7#ardgr&gy%MbWTbn2 zL8(|V+UNIm8uS}&p!0zz!)DdXu)p?$IiAeKo(LVp%@6Zv9lpNnGIQ_RaqYj2GfL6K z=%wvbS*fGVplrA1hDEzplzX16B8aKzY|k41;9Pn2p|5YePO;zGeKospvANNN8?as` zx1?NBtN=gzpLUr7JfL7-{Be2h{XmCoUM>4w(!pz>*S?QMZz`oHrKztHddhM?lu&A0 zP&(%wE#C|;pX?{JJ@-fO=VU629A8@O$^ai3J5`tezg_e-C z6r|6_C0#M93)sIdj>W03nvzx3+1y;%6IldotYMLwQ2HdE*HOJ%wW zlAkCMQA1oASx>ntiWIwfPFa3vQB)gR5XRF=bBXR)pD?R`hS2&}t5T*DMlJH`SVdal zwb0xB{%(XtJ4BEGZAP26YT4L?pDv5YarrcAiJox+<7`EKVt-ho9ImUCvaW(Zb`&;~ zdYl7&9{GMDH|sOitpp=xIqKhEeq4~a)VoL+Q$5nTQ`l{W2GgA59)VsiMVAfuzP3lb z(rd>=t$kgbxYXZ&#Tt#Tq~uffAzUhUbETJ(>u|=0I^Dxil%|cNis{rZ z=^KlWklV{*@gzbaa~0#(!L25&sWO~@qB;Av*T89~5|q|&%vg;6R68RATe+vT!cK~P zHEqr4+A$vq!{>@TE@j)7Q3IXOf7^I;eUv+3UB)`ipjl!SKug{p# zaIoAwllGo1@D>*(f}H-+ww?kY=CECOw-%jR8}S!1X*pE%D6x;{Thp|R#V$Yi1J(LJ z3bjzhDBTczj+cdZ9N2_o^=4QZ)fXWW$3Y*|Ok0lq;l2uV%|^{FoFQ`lF@iEQywg6GipmIazoe+OjsHlg%CkD>;f zzOuN;`jeGlY-VKjMUW6LgoE_AshR{I`r8*)i_&JK+i+l~1TlytsjCMfh-*Q{{6?O`V>B4zG9|4zC5y?Yn4yQry*Aht$pzJST~fm#Y0sQV0z-vHIWGxW#77jx@Z>9-uce&7$wQsK8T zdq($HZABNY?0RYHyF2nKLbq1Q-v(090WTu|lC$*HMsazh@)f}ucH6~#USgwu3-C&H zb@@4619=FUF?1wRKNoEm(dkC%v===Ic+LDvSmrg2jg2|oYf(FU^;D@Q{ZzG35sV6# zRBp`#LvxzGJOPLdYrhOv9n127e*pPbMsvRwexFehG$59lt?zWA^6hk^_+5c;L4%*= zAq4`|yr%?bX9xS$(n+jIeR2EV$GFR7|H~cKmDuF4I<)ARxq^wnZlvSIi6v64h?t97 zfiK0JZL{_p>^;kJVPVz}jDCZ^GO49mQ@l^wat+_=83?I8f6fdQ+Fj5p;Lf@K@btpH z!h4Pw4MHkh9rIT;vFdxEMa&8^u+x?qCGdm4m5~OY+DH*`Gm)TM|MI&~!$3rGVovvF zlmD{a*7OhCeq4BnCYdY!dbKYHI&wp171jnLHuteEdZUi8Z9I~8J4Ztd)(UTYatVr- zyX-?b)6dAtQBv2=C;0}I)cJ%`Sj8-M(d2$)7zij?FKjg%p8ivgyWXY(q5+PtY`sxl zG>#Hp_a&YJ0-|@J1n?DfvbU6iz$GVt0^^*@c5S;y&Ot5PU7@1wq-F-qd+mp7!-eEN z5uC|uU0H0eUUdb^Q?A^`F`wXGd|hyH_H`>Jx6*Kbm`<5l(?p3=qg{JZ^qKdU`|wWQGK64 zVN>pRgFVh}FCH$wjPO4o6a%X$WeDRRMXtk}O7%%%Bk!uc?~bv6XEvsnJ&?6O|QskwnL%n7`+m71&n&~l^}H>xsZB)(l#u>|8ZkrVb_k{az5$^;&_5c zz)7anYTK*aU~B>pW+fG-lJgb2w(Nx!$=dRjpe7f42oPJq#nGf$_2Xfc`RIrI8C%ir zoA!Ll4e#L(di|BqM8Em^eRy5PHFQxw-VgS5G8|o7MBiYu-60u$`D)auxZ)m&Q#-7q zby;d+*U?l4hFhIk17^Kf|At#Sz&FplPBe>AEo*Tnv`H&u!CAyR?!q@xxrxTg7&XWo zDHrq#Vk{tieqDz<93ZWX-2_~ms zOXimm%^?wjGopNUS3Xg_d%04r5+>WtYY|Iioa3g$GZxvwMVH|_=<2Ypv&Y${VtiN1 zP-Ite#rNsW^#@{urgVo$X49_xpjnaDYSxRK(UvNT`R8#G`daH0{y$Yj(d=dtEi5Ns zm=sOAr^3rxI!m%#7Z$27E10(Mn;@Tpq42@;B(*jZSImfk$zVa&4QDnci5=|6>>T1< zB%^NDBO%cMCjcCwIfyoTA7MWm-Rqk&+*P-u84PS3%PQsf&FuGN;bbot+%&6xp?_zd zs;|U8MI6=&ir)Nn;W0IM9!gh5v_IiYsBc@W%vqXCxXd=>Pe$ZdHLS#CE@_9|-kf{$ z3`ylnIb*<4FyGpe?UBz4Y8Qs1o!%ikpBJ#RZ!uv>-2U`}Te62{RB?Ye0dRU!4s<5~ z)~_US{){`_ytz+O1WE z!ykvhd&I`eV8?i^b-zF^Ny5D=D&ew-Irs`vI?mFYj#}S_5^6rZn4XQ13w@W3d)|>b zm$FsFFuB*QVasVrU^3!EhPbjpd5^ECJji2n_@h@?e2o(xgXdrIzgNpn>@3QC_FPvu zth2Hr{Uq=Cd_iL{ewsKIMAN6jWgGd#q;+jj&cxT(OU1RD5C6E6Ztu^#^*-VYn}xQ5 zZcwcd%KEO5iDXW9M!j2&YO>?|!U))sP$ zV`_0cCS{!Syob#(k3fkbBqOx+mjKwJ@@0#{HX^xlpKNVg zAP_kucH5+H-LOog?<()T{2GV_5wp>&&-&$_!L`Ui4qI2@XWQToeQ~VL28>v7Hy{g) z%(5X@ny#G>*4Fa#;mr4SkHAD-3utP+Iu*^+Jr&}D5S8SiYR>!+l&Bj$=qst3>?3w` zm^hsOqMY-7uvnD-c4Zy~p;4OdRkyM)$MU|?;i;yf74)MBE-oZx6-~)0kfC0w&h-6Q zahAAwTWnFwYnb7N>(whLC*x}{hP{BeLyE1n^vL|%5{!@u+vy_y3QrP-lSJV`@W(EJ z*8@^k!3bh*H|_Y|EYZLlN@j6Oc~$V1f)VlrFkZjOOe>ix!@%~sYsxJ7)kAsNWONrJ z@Cc6mWPw|jqnsdrYVN&U89Jm55d`mOc%fp}MSxR82{I8gOQh3z`vVwzISar#S;q{-V69}4<{Grp&@<4XtW{Zw zBxO$T#qqS*VQ=kkPhj!P{;6rSip1?<1cyh=_EE%s2}0+qf>?#h<0cWn5XlUmP>9da zyUa>)oB&@$@CNTB(x3+!pQk4EL6+wS-udh{tq!~t_5#l`ftXE-JbYp<@8a?l< zMBZn8TbmX4l-lgG?p>KY&sMQ@3ARH{W_y+>d4!y-j`n~Cet_b6x`pl2d$wW`deP1N zKWAJ&!ZmrZ4V|IFTj$pk+AH`Y0UD2OCvFwTUvzlgJ0~4EHOCI-|g>WL`1v<`Lt&u^jRKDa=Q~XXzDQ!xXzpkx?^P5YLW4hZ8WOl^-9o9|eZ+wKNtBuzptrcRpvkjE(2%>_E3}F3YGk zYR}#&qtq6BfY&#@+913@QE@hb=kq@TD`eYJ0HTAakli0XBvtF=<=VNG2BFa>@7u-#uI z!2wVQdQm$Z5_dFv41tnB+74IEY?6aUTCA@-Nn+NCLr7Ni6h3+_M+yXija>6w22uW4 zCb$H^sZbE}3_Ks9bt`oY+>u{?tS`h|52-Xv@YQN55us4)5;~(HYO<&{++RD;a2$^5 z=_j#%G;Vc%1$|K95J~@YFb<<5vA=@QW>O`-;(?2Zph8IvbSI9 zmrt3zAhgZO%$8K39!A>48O(pML(BDd0FY*?tOIB~J`$5=S zLr~J+5|dMq;QvQzs{F?EB@L41!)}<2 zYm%F`mB(wN{?N$kxW#O6p+kn11^s8Wc+ulHc-b`|+0k;+G^0k+H7Xp?VbT<2)o8F) zaEY=|TbXFQio%Rwjy;8$ED6ZbT>nY1?d0P)H)3Gn00o#oDr6WL_kR&j4EbY$GQV_f z^;QfmD;`9b3EbH3W@8-v&`cuAp)2zwuEx+?Qs@@UJ`(0}?YGG;PIG%CtfiKUHpb{$ z!9mK-X`>r38JWOCTtA>xLsvHHhm-Fw=`sZGZj9lLz3D%cs_Nw%!b7+`YtkC9gE~Zf zX0;ui7e9Q_ZB2ua?k7+Ib`M?~Ud=*^iv%KnOoz3X0%2cv&cEB#d&+9=%WO-PgY?o` zk5Wd9S@r5?N~tgEm`m(iBWQRl?f-3z)mPhoNCiVya0h1i7j53np{>LShxenclfCXY zL4j7~Z$7JqVqo*$W=S!(%1_wW771sbZ{6IVRYZQ zJpszTU#sdJPJQgtRtIwB4{5pHl$;zmKxp~M5iu?c#y3I62+h zP@L%2)LZ(BK&uxN)I2e`Up@_Uyi?r%m=YsNhRFch{N|O~;-g!2*o<<^wuh}m%#JfC zAPvpR>$~j4*e#Fu((vAq378rBM)8C48>O}yA&|YRm3&)w^07T=6KhKgRl$~+8QW3) z;j5j{KM)6B3r~8sA{JlzGj>iyh~DHWsH+v~3OX}yw(r3duYM5)U(9eCvuc|OT@m>p{!O}s=5wuTl*w59cS8bc}CZ?0y^7^`j(MQl`>^ zupdL4G2QYZAmo#R3~NM5#p>+OTK&=kIgWnU#&`6K)zz>rkkV!}QhQoDhcAQqcL(%X zU}zwE%$~oxlvY*nQSCiBd(+G8qrLCmmx#RFCs9=;Q8zYUl4KnF(>$;XlBa``;{+1#?IWn_P)b`4}_#mRX4 zuASo$D1(;uc8_XK*K{1Ge)!Jw4JGMp{8q22F*aQ>cvFIjIX{+0&uJ#!A%;+&)COa= zgTklVeAjp3W70&izIXvY?14d5R&r*99w&VAS_3Toex!uinfVe#5RwdnHiG@1n6} zC@Owtu!JDSc8Cc2D$DT~cQT%lug#^$9tPy%N)U;*L|x8XtOQqw%=ThEgrtQl+A#$+vx|J!}3*ynuEz^`-g`Pq0qG$aDU-cBU&lnl5tO}P!Mj` zu-!-PY(k}LM@xa=bs3Cq^i$V0%0xB#iAa8f?FtLmezMJA#_uy9ZwiAa55b+GAz>Ymnvh zbDnR0rlNha*EyMw8H3f=nu7Q&DnO4--+i!k%NZ+M-lqRMA(KubMD-cNSaOUQMBLhi za7w=GjmMnw=#5;>se)WO%M*IL@}=e+v*!}#8w^*N906=adqzNd`TXs6ezC|K1D~^x zjHV{(T>P}w_{K`lAxjxj)1_am1xQJb_zyEK{|Y(;$}X8LrCKNs`n2oY)_V#&7$`O0 z?%EE^iJ8Ot6E}Z?F}6|!nA`41W11)3co>2d@2sb@rp$rt2lx2VYFSU8bfCBs8xrr79~0Vu!NA( zDlRP^htqRHYeHSH*XYz!Drwg7)xk^q_C~xNYD7Bc@B?Bo^8|QG{=@1QaD75LsmJE6 zLm^Wls@Q6lDpw;msV+QO|HA2=u+tpY<&yF`q*Qz(a((hiA2gY8rZ$MinEtGe9%A2U zL|F8f{B7r+2=N!4+BsnuAGWrP00+;PASB1FlD^^s>06 zd@;6c8ceKvtbAS?F~>u2up_TbxzR1o@OZRa{-t4!P8I)SJ;%n1dgIo!LOusG%AdPk z)afJ5f9u9d%3X6^SEf53o^ajrw+?+}DFF7N@{k{p*1j_j2M_Rke1cB-_pxzYhH-)S)K@&uuJ#q`%Ppx(TC% ze>!D}ASB`-;BXAWFVmEhTXb?$8fgVgVU!_TW?cTZjVl;iJoY-gsiM(SSSU`-tP2kvrpx})7s`_U>~}UhsQpHaXG`4K29UMO zywozynIC&3K^~0$_HFh!&dC@}&Ca{1nymcmm4PNly8Q-`WM%s&`cYR?P(@xp1lb-6yldlURV)$TBf}3{yf5_htZMSiW!m|*r zRg3;cZdY_176H7P>S`lse;r-ocBPE2P;0JHvp4+uKcQz;gTmhZkv4Kr>e3pSqloN5 zyL|Z+Q@1~GO4}(9_VLoZ`V2Q~ccY|ws~(w5BllFBG`+OTZ>y)$QuS`IU|>8D@g~ir zDQ5!RCIgMTzi9Z+8rn>1qi3RzUERuy`}5%zG{9d6O3S+|Mft~HDc*wtNtS|b2O<7e zM{m$f+AO&oRaOqT6BYgqea!IFOS5(ydss*J`1jB=t}UBHX}2@(xBz=>Q5=Co>J#nF zNYr;n1`c3s$BQVaiH2)Rg7FPTnALZ6^$Xhpbs0)b2)}+-h*1GzT@XvUPxwX?2q1%0=IlN8 zd-Xu10l$OQ$uyMDVJ<69==cC$N#~R{9h*PDgr!PK#^Cf~rFmEp)>5^&yNLTvF&~SY zV|hFOW!(nqbl8-cR3G}j1{(^Po)vA&k5Gj3Qi9piL(!P5KFeIN1O9DTMK8-;sA{j0 zQG%J;7Dyva@I2WF#nuo5U79JH*}J;-(4SHUUa(L%&UYy*s008ZgBBVD9|<-73lytJ zsX%+!#GmUI%ykb2y?zK`ON(pzFJIg9%l*@C1`#%bJi!N8T6eoWKHzlybar&u4xAIP zq;UR-f(07{;A@on<)2viuz%RMrhNsjWwb1}pg~5p9m8FoPko1^1$`>Kon_jE5&)stANP&HT0pp@unkhkeD+nXW^kA0e?mX%>q*W^%D;j00`d#e_JQGOebGeVGHKuc>++g_vd zGb=9^M@MkQw;;)q()5pieu!4L_^m~SqKg0VQV>!L>SBA<@$nwFp1^`V`!bh~ISC5}fw)&HxdjW^t z|Ar}xaoM2ycK9Oc2v{YFxEP*|y5ddoeTuF*YbyJ}+R4tbEGOPifwUa^^5=(L1O6Ey zK4C$?ZEss4s}EpXa55^d23Kf&8hU8hhYX_u2WbKL?+=iaT_5pHYhr$@h+eIn#;o%f z00#C?Y!aq1L+CF{vMPD9`+x&El=?FybV^r6QhJ7f-pMmuAo;`OD~rHpQw$HUEYuDt ztx1HBEefn}rMrDcfhUj`n;DB6TMoDnzydPXrxOAd@R-?;eJk4gRr&~kvHugmM(VWR zSdivq%8o%m&jdWBx*3lr_xTHVE9A`luisMi;NRv&fD+?=buCVIU^~29moDR(!NyVM zMcbfeZTGrZN#82{P6IzhhxM6<4UD{`-@_&g^-V=^A0)?dh8{D?lC__V<&qXe#F>Sf z1%6E}=1ZcFCle=AnNy-By>EQPsmVJ>f{5*%vnf6x%ybID6bH?KWyKnvy(!4Fj!7);t++)_+W3b zdr#N^dWJi?omAxrZy{~n;rBYR`e~Q`Yx#K^xgSW9ZRFq(nlA)3!FkdV7!&Jr6HN1V z-tL2V))6+prZuIRa|DJbGTBdh&r1UjQFe6TH6WdH;|$rj1)^_kWj4-ib!%tLhpDJ+ zG)H$am3a7(RlZ9nbwoNm)vn0{`o70!PQG*wv(;e}Ey@oll@IuX^ibEoXCDeY`)%m2 zw|Gx>n6X~SzL`t(_np=oXW9+;W%hyTK!~~sE@JY=ooFs zJ0M}F=hbA8&$0w3NEc`pQ;kuU7NKl6s98UHTn5Qa$H>_+MV84 z(9Fk7)K=m3$7e#68u{%@i%be6%$He_d7 zkz|J(JubR+B#blUYCws<2xR1GmNCPd$roDzTnO>e7h3HWE=1>#sceg5MxWC6G{tz# z{&(0`f_KwOTl8Dm*BLh}X90S8Wpga-S%!C&VwS!lQ^)qg$TjEAXUs3t8zo9A_Ao~q z$1RVTk_;F;ls!sEZ`Y=L|G-+4sQOakN}gx;tM&TEQQr-Jaq=LEy_$*diph4bkju)* zx&5)c`~TAu5}Fe3{)+pu7#zb#-I>8-=%9o+fs;tbMsFY_f&#nsYO12}(ElRCv0=o& z12N&!(B{YAyZ|@}*!lbH{GvPl$<)^X^&nVmjlaNkmI=!2)!z95t>$&a_J{Il(BtCN z>X{&$-ot~KwDIt=~b48LE=^HJJ$FfGqc) zgUTNbziivikrwj@-|>KdS$%zv{4gE-HaKPb?L^Po>!%PUVyeBK!}qwnoBN3}v^iWV z;!eweZDd5?wZXLm&(?s|84W3+>S9S^wy|G-YT^5Njs6{HFZQ=)8Ot$LoBj%AfQsn_ zUR~+SA4X~GbAzcS8kV>&C)pfTfjhb(b<#Sr&@%KH^q`zkFiCx9^>Xh zaXA8}t02|vJ}~ZtAFr2965zFD8YpwTdWSCy1#Pe?ow+2~P`q#0@HngZEa z#OI0elKm~4el$2R6`^l}94_zJ4M{G|DrsrgE?>Wy6OfQT!>$c4E`_*REHW?OF1e3B zf_3%m{pj^w?Y?P3Lm-;RJORg6n;+b*bkIkh0t*5$ev9=FoAR&eR*rRA09O8Q9v$uD5;K7GTW-yTvC!9R%`Z z7_<-252Z)dM~rfK1YJHyC6+uwowz2CSzuVgeS$8(;hyCTQ`Iw^yx5<`-Lb-aVxnbl zd*1(C+JS^tE%dmzD*QO^>i3q@uPZg$n}yvcKo>B&oUbnaIT$n;q=)

Su0xTu0vU zaAe|6)5pfP>kW2l7|ncnFS7LS*gJk+ju;Pkca%=!y(PD0QFVHD2woXscI=kye#7`DgQUH!LF?u&u9wog9udIr!&$NJ>%d*yVf zE<@5HfJhOGcEvdf?Ima+rT+P*z2%GzA^#hS9~2a-E9;)%1)jb=mut_N2d|aWgNW1o zUvDfeDJaM%l{qRkE3_S5xYkq7#-DsnJL{l2x=NJ{JZ3%B#97y?6mHw{{SQOkZuAop z6VZ@c1`VtOXS&^|k%GMSv9Y4A_iL|>Jgbaew`gK0 zm6999r_UVfoB}^LOh22uwUkgY9|$gN34Eh|By2{>-ttfvMbbaSDMkvNBCh;6VUCmm zpTmX~dU7@<4uBgK{hG0R~PHyF9u+l`PP(*;9Y0`@NGVp7fBsh}z za#1CT*bn^B?v&+$qssqj>gQMiD|WPrd@jS$^$;$t8N5`JB(p28j;@yO&hR=VS!TI0 zfK7$6DAzFMZwI^kOKz&u*cujz2HUj7?<9ECU*{GBXkAw$T-X$u)@Ej$&F%&loX4~I z(z*sBoO;j`&e`cuq{hI6$FYc;8NprhT_Kuo6$kdy#rGDjOF~v6UrPrnb<_D>{gvbO z8(uaFD9ZiT7Jn+V2ft*As`(Z~b7Ws)5=%v7RzrN`BH5!i`>Y+_MLvKXp`jSdKLskQ zw^G?^CV&w*3M=rakhwidO;J%V5I*%iiAMQJO?gU<2^85;p#j#zf8*!pfu4A308g+( z26Kn}<_(_g2fc=>rUc%gWh#jrMa6od+>h&S&{z{DSNaCWcqMZ$OcCtS(*-)9*^% zs`f!Xx*QSHgk)x4Kxbaa$<~hhi*CB4m3?J+FXHiBTYXB1rq@Jd9SRN)_a*O(cTQ!+ zKXrikzsOFe6x2FsN-9CRm@qs?`XhZNW?AN%q=uOxqBWR#eX7$KBMN1w5p_M|Yqzn~ z$y~-X1X%1OXrUmpcYK69`juE{fL?7!ravTD?085~a*0l8h$F{P-;}N|w1v9GEZNwP zt<#MoK8O5tmoB7p^m)2?LV)@C_;y;G{jK`NZ#M4Yu&+*#qG%<47zWm2g*)|-w1}ER={(Fqd=*qK`ao(3c26?qnN$;QsfxYl53EAk0C^lT5PG zbYbBIG(i?Ek51%}OyTL!F4x^+)Zd>uM?DQ}w>0WJdP+8ziuiV)Hl+LST>~c`&pisf zHCO!bN>8p2qL~lMuBD%or$teJKF;yYV=`D{Ar?4wBZ;)I%ls_zeljJigZ@Xon^=EG zih~;glu9l=yqW|#T_j4HkgT`GhKwZAB?U{&fb>Io!53ML6t%{lyub;RfOk`?Ixz2PPls-M0To0q-V0b^3d?3UAOg%o6Cu(k;jGQaL!z&Zm2Z zERzbDoW-Xsb1p+zOVd0z0R~(=a9#_C?nw|A+c(EiM~xN1(yqaD8eHNNmV~CRq7s0H zG&!}I#E~_`klbSn0E>~w#RLS)*o}$2zW-+g-~0&r1Bh`XeEBUNMb_`LqY&4lP3nnj z&TE_5dF0fsLx}}VGG)xOS%8%`n|@Qb)hq7->$_^;XGydkem9N0w^){ym%GoqBbkZ!Fe-CdPK+RuK|CD$*}b zkv94{iarca!Mr{ip%Q}TG(_)qP13oN>u0oxZnEg&0#nGeVp^k{G+XC6hb|?fpZ)h>D1G_rZIXr zc4y|!r&??9;yF(L>(DxHMPmwZA!6LB(wl1pDn0}Zo0=`He5Qu4*;Y-5uZq!=`4N?EDN1jdHS899-D&D*yFDl_a}P+>fC%nV-=r za0*nJD2r<9?ZcGk^cGhX=9%*m6Tr2YGdd1Vh~dC5*&fF^HfrYE^wyy%660FCAj`ko zIn-%`_vrJWr_W&^TNFv@VP@`68oyvfqznibQnYld-|9_A`IDh%e-5bn-9ECH{Ly(e zG`!%NMnprbZe^ygA*Z{W%$hy%$tM4<_@2F6IbYwghNaeas2KHe7Hl0b_|EMu)Re#haZ9wRX{%-X}KD81G z)$8z<)+N0jX!>5)GKDFhIvF3`+PN=}LF?73(A~jXw&MXuU=3Ft#s}PpS0CgWqCH7T z=hOQjV#{X^;n(efWe4t^**=F1yN2bv7|L&q#EzY;D(37#m47u5WtR#nCqnMck; z)X0^$Zswyj`o14SEgFW~aO6`m}ZD6mxBMHX&PKHN+ePztkTYoF(|Ndq015bW9@9HO#rvGK2FV z=Ff9AXfa4-VT#M;>H41b!i&#wqFBuAHK48;Ldosgg32w;*s>&z#qgvzL*PceK6wx>~Jq;XUS9tF6W%j-%$If%5@E_k&8`{)+6(-x`UaKIGzXHS*4}v> zZ+1t8&C&M4BIp-w!b@Sw5Sm?)E)prAvq>%J6zD>A_)qO{FEFCK*$kQRn!X`o5- zug5U;-(#9W>Gb|4?@MuuD?Mji8M=rKr}er?zT3XpAt1!KFi9)2ymm~Kow8=hwN>Gq z`M5Rh0r~QGN(jd~gN2d-VdMGnw?OWmKV#~yR7~1Tqabs^CB`(jFXENo(OMuJ)9fL* z-818E!R%%Tz5{;vp1Pb*dE8PPlJ-?aK4U=4Lj!Wuz#;JgWW|$oVjMb6ccE1^L;&H` znodj`e@nx>Yimj`gfAPwGk&S>vp1hSqQLo!43d`o=r$ZLs1_s$QOIT1uu=`u9ZFj{ z(jV&g6iWNOb{S&U)Jf&rYX&?8D?I&jJQ*3H(jhD7Y6;(0FTgC2l$8II=1))js%|5< zjfstl?KvuX9mFrBBogO3_{6j7G|8*eN&n)0NmKYQ*MYgT1aps*Aw**L;G;gR*q`t> z6u5y2?Ts$>W5wlPiL!?5Aw>k&q}Ck%7L_(-rF;^szsQL(Y&t?~pC){Iks*S(6^y4o zW_v(}CJG^B5x6@>#`d~&>ya|So86w=h<;k~Dmnlx^l4tOY%mfdF45LK4`{@drct(v zFqNn%_<0E`(cHrY9=87svbldDd$AEh!uFRJ-tJ~+JS3-Y?2zE~>+bdz3|y&3m=I0* zR|w6cvfJTi!rDhhu#xS7rN$O6=H~AYqVhb}?b zt>xg#+=f;a7ddtj3(-5DeJd3+{e2+DZ=AQ5o6F1x8_@fy>PnQ?lv+HyzU7!dBSj8a z1;>?JCLU%nIig-&UgZ(fcjd+X{6+{%SO>yAB)!lKa-f43jQ?IIuR*&ax$(&u^5ABcxC0wjG!S%do z+2=2RPlC#<#%|PN`=2EMel=CEwAphj`gXr~33*5E`-%@ca)iwrv`)-yL(^gR|AQt$5mPAdQ%NvSPq3Kgv=AQ9`TEX_{h#m)|kvjSBIlx9k36VsG|7aPk zD6w*5Gg;~wsFZx2J5<6v{dW%s_KBJP`{B&RD9k%MmWlHLuPwyat=TYMz&kRHkNWSu zL|U`s^IM=hzwrCD(rKPwI?OGH&>&9`Eu26CRZKINJDlJ?*KwxksD{1?Fbct4bu;#-Kz(ZNY$ zB?(}pv5SeXpB#hXUU&Gx_hsXJGt4cZ? zBCP*D97^Uh)Cd`U4y$-nBCa)X+{udo=tSV&3U{CtD?t z!z*T_+cHA4iwW#>x-w7fzL*nO9<5uk11lPPJLA%rsB@$Ya=@Gje2VA?*e0=XQ3O`$}dBEUT3H}D@Cqx#1{h0bOjy3mhWEH z+yZ;a`bFPIzCc@!M;`N}HUu#u5rshfy3kZ*MBY-}Rb9|k?^5tBmzlE-B_x12MsS}7 zZ9?HsFRbi|bfw*LD5V4G7&5iJWIz3%%S3*dXVDY%-*eY(v~~aIe*b+{M5zDLf{Li? zSEInq$*fOCnubi9*^MifKd16oq?@jz_J~t=!7R4)Bq$ODUSQ*|-cebo+iw+E^^Wbn zJnU|%2%~?`)XPSP(X(+lie9DOB}zPHulc9<>{D46L>{GaEhbq1mk%_#7g+LWpJ2bE5N} z3mwZM$ke=xff(DOM{JG?5&-<1XuO9RqE5ThzCyFRS2jVP{@+LXr@8s~$h(!GPNP%* z_uqd>z?dKeP5`rwMSY&{V!scg-!(WBquqU>+mgH)=WN7hb&cGT>v3T-}PD}tRz^xzw4Wy67AMhr7IE4aIaDcxp^hpkMRDs z_3n3CpkZk}KH72&^4gpug4ek5ErQkxyC6h37CnJFLE`R@jICXZy76&Mbsv&Fkn1|I zL?3U6;R3<1R&wr}XOi>vI4!UEc4k(v{-fppWr+WE0CrL2p}0f`lV$$O^$&q{@5a?O*T}080;6oG-B>3paJrxfu%0yPXQ#7} z$%d@kWvNbdbas{k68(S??SrC@W3iy8L3*Vr(i?$!keT@xdPIT(@*zp6lkWZZ)GCOz zY;%T!Trm%AVWOG2WoPZKXH=cgN;ZyS?1W8}kkU}ekHmgYhg`kZ3I#zO1`LV_pNGp8 zXJqAe{70!bQeQQzl36tV`@~34!WIci6j+{!dqI-o^M!bZszk0E#*JDHf}b|3DL}Ql z#}`9X_7fQSB%iV{Jg+Yo;cq*^Kp)KOv(_Q%phY!`;^Zu-G)P;WrHHp$ik5GyBs_&V z+jN$woCilEv@o4qp@gjnO&UkBINV)1hACnKgRr!1;^nyF=fO{@WFOWlOft<8t zDLX+n9r2xW;P8>HR-ktvsRH4;V4=LMIn~8KkEH~bf*lnmQ@bJ8k+Vz)^?egj+IP| zdvu!QMVA344gug=fQJ6Gx2C4z4UQoU8BtmE{O*8(8Z!NW^M!Q>p?8m>^B~e{XF{%-tOBqXiu2NLb7V#gE43hBM;>}~9e#FwZe0%sW9?62tI0LM_f}(+`ZhZ?Qf}k3@_gF6~qtK|4#crAN`u_o&81>m>;&B zh1}?A^S4{zuX!JR6}F38y5`8^spC@tRK~)6K92+3UXdT)H`Q3+94|7X=QV ze5ZaLUcfQB1XSL>quQWw87yiy77+vV_2t3wahLhHI)ubl8t|6_UQnL;LCzkRW40|T zW6_REf^O1ho#m|&esV%a^uqIvq9!tgjDdp;dSRE75hIpA0m^K~>7sY+Gy2nBjM)3W zgQXOffhm2sa0o}vcst1`2%PLGL=}Rx*uC~Qb7!cBa4AJAgW4pPUqR7$6mq!6^xFn^*ExstShA?wm&_ZmxlU;7;WAnP&AVwv>z6b_t5_K4AFSTb`jx&T*7qzMgcGHpJ&fA3AR5718F2j zJq*wx?QVds<^4tPQ&{AQ*;7v4;^Lw&59)1^o^cPjwhbDy>)YYSjfWBjolr4 zZmNxi!?NV2vv!^m+GE;-hg_(Xi}X@*&*!&`e#=lR&)mOyDMY*G;Q^ z_}s6jiCUIjyPfI+caGLK`q_~jNd*|gY@LwZa|$> zMO^8NBZ#GNi`PVRzL7RCFc5yTow#y)b(G^a9VYQGBhkFj`*>B+YOS%1+)F*4uF2gQ zaIN0y2G(h@x9+{Ny5dyJtgd3>RJG36t@@}dq@qQvTgPx_Ahq~PcHj2H{5BPZ{YUQn z%>##}l^5qs_-WqH=9R7t<+%IoKh*#&xx)`*94QUnmK#jhpD%dn?3|qyZY^kktxSt^ z*kP^GE7SIgceJk$&{T0Bk2Uq^?7pKfaA;pYXXivl8eqaRnq8xi7ZyDv_yGlvmt`lG zNIGe7+Rb|@^ozhzDeo%U@plYdL~pk;CUEw0-%US{2NZAz-84-~mK6y6j#g1Ty!Rw8 z^vH(Tqx1a1|RI1j#aed-RpL-uDd{{BP4cI-}gnZxhuKvvu$i=#pFJ`qd% zk2P>B`T_nxeJ`2O^k+OR`JxNnvamR62A@vwdmRRH)itB^BtRjR8RLP2rQ_^pP4jAR z0o$J$vc+(Ey{!vYD@G7^xBdZL`3w5Sxm|g$S-mwapBi46(q>Nmm8(n=lyiGZ8-zNk zk2w}<2>yQl_0BrkA?Z}`PY5IwjXY&t_AAXNHi#&i=S;;*2tA>!*4Lly8u!R90aH&Z zi^q~EL~BWZ06igNktRcl5F<^utP*?lv*$7F*`N6-7dDa<3SWJ-^wB-sl6sW!%i~=L ztoJd%Y~!D1OzBd(#Zk%2jFqK0@QCrPP{IHBq%z!JVyuH*UiQjQ)AzLwm)3Otniqlu zKJNOU<8MET1V%VMx9#;^|Fw>!8aM=G&*?A+e%z~-nGhMtGe2<`|Ac#BGRHHP5}$PG zRXf$F{`Nv$D61~AZ^SJRd}u6nB^#K*+rIO;-bUg3JN|bTR4!I+0nlox2GQ zHTl0{Py#MX0xd-r7BA+&Q7vhNKaN=T>TO0Ek-;$cRSSS*ms;>4xO_f zPoY;3zIJ6d+AZ3ZtR0h#B|k-f^2x0JE&-5(k-e*ZPIPfkQ-b|Ms{6dBap>R7f~>}# z!7iV+cH)7Li#d-&KH%;U^b1TtFU=46Z=+cM$v&(x0KKAz^>2L$`vXKi1j9vpVR>hZ zy@XN{LPASK^%~iUo!+NSCY|1fMV*~H#=a;a2+57H10MLkW#Uo@DyB(}vrTr6fO8XY z?OvFEO;PQ5_OYRxa+@tbWn?GvnGy4;B$5%R3dc zN4ZeefRahd_VPD%d)4DePkU*<=rr!c!*U}5>^cTiR|q)cOpf`CTOx!)Wwmc?L z$#$^`m%lH@d6RDZorO$U>F(X~FPz@n?5Y#jC%CH-m&=VbzXU~wS~4uj zxjzN3Src`z^P|%P(K{W+lTJ(poL1XIr$4y(bNPLk-V5;wyT)G&m00VWnofHwCgyOb zp}sg{8^a%AtiJa;6jY}Am-reJpdm!F8~OG2#9GhPalv`ji7m2&acIUQEPA8-&(_b9 z{LzMtUy!_a`)eI?!=$nkd$ZpmX?PjfNwCtJc0Lq7B7&!3)UxKtg^Ml{y!@-mru8|D zPo;&sU{PmuNY_?sUDH;XwVGt|-%v@l6I=#S*&sDS#nj6yq#PC6vby!jVG?&$y+y^J zPyWGVLUhzf!~6WtA%&bR1V1dC=mqWQ*%SzxVNugM?|JYx@n(~{2e5O>##Gd_hUB{# zm5~*QeC;e1E701mNi3;lHJ6u*Zz!unCAS9;!L-`Q&v}iqot}m@Uy+1!9K=n9Iiz*g z-+;xa!$HV}o%Vqdtaq=X8}Dd#!4~z#37oSS<983G{Rs4%xmdlFoqYx3&{L?0!|9AqT4%|_QosWqpZAc7iCFIs)PY59H@^s7g zH1W81ZNv2-6YO|2&(X5iuv(At;2}c>$VYt+p^YpMbxMp*l7Nq07}Wc%xjfYC+2gM0 z`I`mnhBHzjgJYt)LY6f@!ZsF*o-7n@RNM~Rwyt5GCT~4Q?uA(0%az0$@*jNY$#yYX z8G~o6_5}$%HhLmo3~s+An8;Vc;$Vca8DwA;mk+P)HAH!j+Vd@={O=(#FLf@3hQwO*GNrr+RMoA|@67Z%6$kjqr$xI5+7{W>WMo3mY^j^lllao~4PA%Qm zrbk$PpbL_jk>2TsV)`kXPF7QQt57>BBl%{L>+*!jY%EnTU- zm8eq(l>$0(!*&7{oZqtitChAp-fj690u=-LpG@m!xxkJ0qAJ5pG@&E|M{b%!L$bpK zX(N8BQ*0j%3e%3&aKgFt1sLKHw8?YW0I={CU+{T~Ee4}3V@3S*NL;t#jaDkL_r?tw zA#B^*!r12cs#Yj-${)CQHLhbBWMdJq7a4hbJaAY0gQDfEL5JJ+44?mxsAj*_vQ)IK z&gjaysx4GZeS#D!)H*vtWk9{bIBK#_z~OJbm8M#p_e>=}_!L)eB8G`Uwb+1b=5hv9 zM*4G@jF^#j4!6H+%li|?y4t3et=->ZTCNl-O|!lFpw0bv&cjbRuHCCar+Qr7J6^W) z_MM4gNZa^Ap||P23a4rEhpFWTGeiSk5gaozZ+N0nLg$q z4;otmVk*Y-nLQWI5QINI-=H5Fl6-{3iIWR|L7{(+Ka=027%2*{4)&=A;7cP*=^Dv` zo%j}y?5{_6>^?^MEt8W|2w|NKekyi`>MGez2Xb3koiEr`u$Ot|O@64W3|n5+*AuTY zC=v3y!My9Vj&WFYS?m5~>W1(>s>$RqQ?bdF9tM`_<}GnHN0(&b$qplz86p~TqBV)|-UZo)4 zWaf;DO7#8f3$%Rn=T>xO8hH)d%<9V*+e0U0$roBgEd@tVa!X^M6FzREr<8=5lmb64 zWHKO2pCE!B(oP6#Ji}XJEir0UY>d}*@*h|1?RFpdGHgCDcM~YReG7g<+S^3B z?wwPRgYei|1l&b40SboQ+Fv8kA%Z2Mf6yy(`(Z8&&uCTZiTu57D*tG$*_%XG({YFZ zSn%~e+kEfK9Cn$06J7H4C>fB-#XxHaBRKs#llJT~z%zH_T`z=@9j5nO#w45_6Dl~x zqCN%a+|718URi&HkJ0fa%JBcArJNTD*vBIjf@#{xji1f1l#}U1Mcvp(S>_WE4qC*& zJu)QGY-2&u@9$cBewY!>&eD1QSqi6kO8n#Zi4@_tdR^ZVn5~0?q<2@lTeMq;xA6yByb^i^Luj0oiE#+%DKd(o3%a}5bzXC{Z#kP^NEy}RF< zgXmdDNYrDxNbJiOu$9%EK1J*4{BzH`Ch9KYe7MgJBhwP&DN(~2*s zb~`1%>h3`pVLD0Sxuk%^@7?z^%5kE^J4GF2xLCBmWk`+js(nhnZ#V~Oi29k&!a2MKIHTzt~KJB!h`(0?7ld)!c zjEDi3lsAwkAA_tZ!cVyGDrU@4%}73eJzM9YMJ6?)cBT1O?d*nmtId#)Bh!1gHzn^! z(`sH=m|kQNsumycgz#aNSJIE&s`W5LPfWqRN9m${TIR?!<~KF(%3J0a^7SakKBcGU z=hW-DGrlD?h>ekon7I1!_^Q8)cQ+U+5p25~&#~x60DFPq%+RGufUHCKXF+We;2U+s+$2HB~Dd4i8 zYJb}=TB|Vx!~h;sSDSpIPpPmVOl)4Cnx)FD_B3V}u{oh_GjCY6>w*jCsrFoN%Wji6 zP!hulEQoO1RCUUlHNXOLwfO=Kv7TDIq(cou^|oR154cF)UceC|8qrC+YY?O1wnS7F zOsDC^m&T?7qis}VCSe)guD4wh%MjWT2AfG7w<|>XUGIai!ab6Xy%d6;wfkr2L&UXU z_etlfAI!gn62pwEVspR9)_(KWpDj|9ao4-8#8cF(ep&e;Up)JFkov-SF3pd$XhbPX z&QocDe;zSx;x?3(K&yf!HHtwV)`4fh?)Qp(flmeMf$I9H zsZ{>&gMYC(2z3LVA>ONwrs0MMsPR8wY8i+ppBItL)BcqVHnTu%ivXsCqD377DJA& zQeNf~2$qY?G^q3&{WGi8Q{@n7X#Cg^?|9sCQgxDja^$>i-iqr8DDSetD^>#=8hRRc z&PsV}=F2A~a9DLMk5ppNU_ohih%NG8>!g00owCt*gR07VCA1x;7E#)N|0)}PNML!2 z_bvaIzg}^iSMz35&GqB;86uGLQUhso@t;f950DY%`T-v(4O##EUoquhSa{mgyxCKH z;5J3Dn`z^=hlWsX*DRsAeR$?y_r{ZzR=vQh-HqbyPsoztcGY=GR|tY`w~=nVK|I;a zxFj4aEQRbZ4dP(jP{UtVmX6`IEa@`Ta+!&dT5=|b>C1(vqkSitD>m(LD!8P|xTrHH~ndoI{n<9sG?143A<=nDYYa5gQz0>fY@wx9G6+$Tzjs8fke zeMx~h;7{v-?keC-Q*wC!`JH7|a1(t*ekx3FZO=T}?#cv6V>J32&Kx9-COzS<&TR{! zFJbb-v~SZ2694$DJr<=QC7R)+>a$+Ju-P|hob9gTKg?b#Ss=5R287vd#lkH)429@D zI=y>f^3N_*y0Evdgu{G?itd0YKh^SB61cFrko@6K=EicDzhhld2)7%v(gDZ|h>d$+``tu$Js}WFkwe#r}i9%o;W)o&MMv7ngQBYDUhh^VI_CA^1 zO_OqVUl}9KRUBn>xA3iW1)18)lDB%)bghp5b$xl{~q5oXh4#E{>5< z@6_eKOyiy<@3@ys&t^88XBE`*EO6g;dsIg8AO^> z<6f_-=3Y1Kl&)&0cqRq@oN)Ry(C16(I9J44kRO&-=7R?SFGI(wZ3%AS5_eMmA}>0S z@u1XZe1GP1q+qzhlxI2qrDgv&4pLAciccwMCvm?SB=Y&Z@U5ul@ZjL!9`mN+PKu=Y zN%mZe_0j8<9Hd zT`ea~D1F;t$aI`=q?GuMyM(&CY4BpT*_ieh?^@)f&3&RJSZMHCDeCvDi5rP4`j3hO zZ*ncSjs2-oa4puFVP5wuW{)z!5ji3lx=CVyEaPPp#ET5`j#qE)qjQbbD1~Q6cZ+r7@;o~zT>Guv((w#*%I8ZgeS@JFg;AZ zAEof7pg}~KYf4qev>H{^_@7i_VxP&(dgjrds4}cDaKNYoivS1+w+rJ$sa9i(%!DH&J~Lz;Y#UQQ z%SoB#Nhg3AG%ScSUU@ zILhmG%rR=yA?xtm`o1JM-IZr9g!@|DQW67N92TasT};B0Es#9WkTg@9z#6tyM$+yu z<`R;Y>W^vUGKK9|YBD7hb!YF~FUJ#Xh?Ch24TsLiK4X0?R8e;;`Fcb|uqG2Li$zF? zwa)W=lV(@q<^X)>cHe}FM921`fXFoJCpus~2D1lF4BKVW+1LNhGd%hFWZ}4JbiVG# zf^mxybyT%#b9z&c+ki;)@u*=dn&z5qzCHiz6F%zUtaXty7QIdaq8@7@=g`*dE<2m z)^^Stvo)btL*#hZG&;7#Ef(&=Z2;q?&t-Kl>@M{qN5)Gl2(3Ou&5IU0&Bxn1BP!uZ z58~w|Qck=XCY}LaVkb-#*5@Hjih*t??qt+mx{!;B7DKRAiWWD)Z+{A#Q-iR_JTrU? zs{Y^TC@1nr2*-Qj=RJhF|Nef%m-RbQ|2?$gO3P5-KJpHKajICsTFymvrTh%=X^jDY z|7U~|9P9m8H;*p5Rf1)FTwHtUhr$|tfK!fuYs<+>3;8IeF%D-GLZv7pb6sqL6j0z1 zuN!}p_sYgXqS$8`(bercP-s%*wTD@%lNfd*Z5i~6NX-q$L4oEc&DyG}2=_BO{vetk zD#DmTXHvpD{YLau-(K$3Ve7rwo0P3%Gf7AVf0Q00R$n|*w0taWU8^kC^)YrF>y`R` zT>nM6!)X#o5NS8I=M`K|lSZ*Fs6;Iq0&$^0*TlM}7uIFEYu5Ywjo%E_l!4V9Pw%hh zCzHWnft^WVDI5+}?x?+L&{m&T~v%B?8basKO&^v9|kP3a4r&<*_Py#baOE+m93{*TiVfS)6>i zmH|@f4p?T2YLT%ioqknTF>os&%W3*pgDS!3m})(;*p}g`2lrVy<)in$8ua4lrfEb} znqm3sl`B=w>^Q0(p4+M|q^AuM8azBqq?R2ISGd&}C3>kqoNK^R+fdl)ZY)Rv-m1Q{ z-m3NeSV7ky*!35Ve*&yyuli3mmz@bBpn!%=*+#I)N8LGmY{f^(J`womk5iy}P)6;m z*~bJd=U`VF_*={OUfgf1O|}lg1LT%3NYv84aPNQIk4gv#Y=z@+{=hR-HevhC zevDO%4IMbu;syVmXLR*zcv-gIV%t!|S@S4WJ0>+duoNR7BNtzCTV~I>jU0BVL9g%n z=b?+-yI{0b@ml;w1wKf#n}aWF_n$_he|US0wa1T}{x0E+IF!Fr!uF272Oi&ta^XQ` z!p(?W;a;d!llV+FEiT4ugk4woDIMR|fpWCpCy`^Eyi z=E=VBEOG%pD(4*_Hy9JZW-Z$!3s?WeYNzC&f_o%VLruvwpdAY8edQ>2Dn!uciOiiI zrz?(jJj;+#L*q*phI;Vef<7g896-Du<}jjBcYM^GN4J4toj|^Xsxn0)7xQUCJTX_> zNUZ@wz@F>*7x`<9#JbHMVz^enoc!dob!)VI&6FKgi?tD-QiZksmUpqcUg1UYPXiM7 zGQa~zn>qkKAb7}Yp*@2qyW?Fbgalg%T(5iLD2x|~@&!-USJ7dOZO7~Fbb!q;zYVAsOq$pYql4|c;F!O+i9NcEM< zh3Lu*QBslWZ=(2OO+&K06d~Y=3SCm{a;ITj{tGjv%*(XCVe^6$gU&y-%S~(Y{sv6a z%j-8j3CgpSn`uoe;4C}cseEV6dAq}NLg4=s< zP3Fjg3}3HJXLi9kf#(8YAktUP9{=p&_WxlI60O>61V=JIBdvBMTUXsqTsoa#@_bMe zZyTrIgC&6XLI6(UaXEo#6vFh0P*^PqweuN(iQ3hMr=wW!V7y~V?CdG2`(pJ>M8NSn zaxj369Z5_3FW-WcK2O*FoRg1DjBQDDk0b0|XV8#v=hauS7QgqlGTHd1^Ej14qTM1a zqzF)>GVO?vB-3vs)U9u^jOmc(5ge_3$=x}Q%tdm3lx*MHNJv$P@^W|)jRYfw62_N0~@ zp?WW7`sx6OEM~+X{K2oK1NLl|yojR*Da(QvI4l-wg(eM%#GVso=O_Ba8(4<6C)AiG zvQi~EqJq*O9Uq-2-;z0A$F7y|`BW`TG?vkRnp%R#Mk6olht_mb0lU)3+20)JoA!k+ zrrx-y*&btMZoL=~_3SN<|8f)GtEZoG;GWT`VGS^?&G``1D15b8W989y@T6%zQ)6tu zH4y|JWq(MhS#h4Zz2dg!!u3`Au>Tsd#%5jUV*lILZ&Mr2NeJ;%v)i9g%=#D}D9?1- zIQ<=LUHYa(V(O%JahqFe&CR5*YKWmIE{&xEGdx9A@~5^~2Igd%#{4&1Yzyb0_~jRR z$H{V#uOtw1oXq8hKLs7KOv@49lkc0bSzfI|J35cU3J{?a;f|#LdE62IOPP~FWyS$7 zmp~tzZMWY6=XO?{R}bfI3w2Lsvv}zC!+D@L!$<$X{ z3}JLH9{BP?6TsXy$*eRLiCtaoCEj#%TxwJUdMB|IZu1iUF_uijln`McGpeh_gia(q z_iedCK9Z&SLm#Q!z50p@;;`tSUBs|GR%3bYh-$^w+(CF)M0E|}rxxAiI@A=>OXX+J zyT68g2%D5=_|k+j>Oah!qI65QOGv^(34Qxk*KgIWU--(n&kxI^oM)kBK}bO6#5Pql zDdavs3Xy5@1@sh6QJXAZGps{`_6=-W}0^Vyc?^d^_T` zpo&{z&3Wn2aBJNQ-k#`?hLsLq# zwwJqB8#jz_nPL>n{+oJq>Y)X1V*Pv6q&lfL>Aoz(|d@OEiZj}k$yrd=l?Giwjh8B zjs0yr&Sg4Lk>2=z{oZZfX`pUwynH#mxM)mFOZxh(P5v3+> z=%S#vrYTVK6MdZ8w4NC=72Q*#q2*(Sk!?9a5BiX%;_=p%)e6%(FRean>NVo`a`}$(7XKo{aj($ z4+ZY#t~S(sEQl^ia=ZR#(Co`po`HspzM5tGnk(_5y~;`tJ2#0;dju8hPbW%)PYW)l zwwd;M@Kbi-n-t&y8RS8-Z4eknS;O>;d>Oi3nj!|hR02ZwSw;AS4|Ex4Y_+Ez4AdXM zEd?4D`2k-sFrDXO()mRfxpMV8q@AUtMoliS49qloNlIu7GQS5%xf-;7R*mu-p`P>J z2$+6pb5O6(9xT6m_R*K2>m4_88kZj_^Tm!2S<#QLxkC+dM0(_xP`z-Hk}d%*)m)Z@ z=oKN3fp3N>K0OL5w%ng2Q#tOJ?FnH%i(L1Ed)=ZJGgSZKyqx|I=OyDEKw}A%mrnXg zM=wmg`pU$+j=DwcNVlV6FIuSvohO?0uA%4ENVTAFQP1v6vlmaQ+_2PL)8*@Iz@CZX zFjHRVSvTG8`XlFCus8BoEs$hqHmc1dW3&tuKmnc&(yb`@3Uo4Bh!v|ls5ffg&3;_t zOf?WVF>w_*TX6s`)HUU2&!=%Y>Q)H%@ey99v*IdSP4rM$m^N7YbyU<{+b&%yDI#6cNP{$pARU6z zC5?b|3`2;5bO;DT4Gl_ncgMgG(gIRL=fEK8`SHBp`##UN)_2ZX=lnCX7?@$r-uJ%l z>#l20DNmzeGMT<^aTvB7lI9ZB=iom?9C@HSG>un6bwe+M+I{?9(M16he%OeCyia%2 z5TCf5UycLp8vP}W=|jLXnLRWn5d&r%#P0(Uy+lb@PD4xwV7qF+6)l&-y&l{$a()T) zaG=RdrjMMIikG=BG;sNGbyzFYp;Ko-o(z&JuZ6~cnDge3a+nXHGKI#E+5^uhq}5)ea6}BwTrXR_D(;+n9o0!9}FE{ zzes!n->@Neb3IFCuMDAZ6_GLmM@PpHQx%*kfR&E0fEs>3#au%4cwfY31Z z%!|Ufvam3&KKqvL3-V@qX7Oo^f-A#DZT}B{9y2W?jEl1P$XJfAkyZ2IYQ)yG98x>^ zp&q=E6}a#omT5tV-CBZEO}{qUBZ9Q9~!oS^9 zh2j(v%s^jc7KYykw%&AW{#o1n$TP~z@E3{r`41A20x2X*N#-CEw-m2=>n~bYvR7Zq z6QvwkMe9>lu~^!Ii6AP2Hq z%>fJE5N>unpJy~$;6lFANuau5b>MwIlpLXxT4|`=Z;*^;{el+|E+q00u*76lr3mIWc&4q}9 z7ND-iOX&nM;j{pB5GQ?O{29hHfRpUZc8N^(CPpGdH}aFTiIK^M(fv0#7|DOo5d1#5fkqC3kAf7&-_O9tm`kvRJC`oB;52e8_K2J z?hR;|y`Zzu>4!i<<1}Z!6$bdpeJG>A-gFCkeVJ{C+6X)=-xDqe(~cX`^jse-Haod# zyTuf!K4BSf94t8?&>#5t#Yo9K=QQJp`E4DcM}`WlURC5zxOe+V%=M8gN0d?->zs@# zy_!9j!?dCTTrt_o%3pk@c$3Kb3;tkFjOLsD!Q&z+W~U4MFM|V(+F}d|QlyhR@Z0PA z=bj67!VY_vPHB$SqG4*u+`62!W(#eu9EWNqhnlZ~k;+p2xG@=3^fXdW<4q0M+51J& zpQ^U`2JlzImw_a^I_ygPXjP7Mj&XNrx0NDuV&SQqDPk`a_tHrp>qbknTxh#`B5PA} z-KTT8I)a-#F^|DYc<@2Kq_WY+Z?1;HQYtdsydAsWjDAep;jW|e&eJakgWnLy3Qxgn zSPjg_MRP0S)n+x-qb|kfX_I!Im^~D7=Ty9GYrjTG?7wKgXolmqg;mP}1W!iM{Lb8f zT$iro(Nk*+=n47n$KQew)YB}&@Xj{6g>SW{b!AkfV!TYIf;9q62*wVEuso{6f}L9J z>MjUL^67IyrZT?X)zmDNLulsBhp-FpFTLD%U5roenyK!(dl~lQcz@D<3*B)d0;l8c zQ<}KQrKN>|=&d-pGpD%C&Rj!<1kY2Qm{B>cX;ys!$q$Ae`9I%Z@Tg?3GC@?!r)zFE zz)H+K9k;cr`}`JdOISB4dj#@E?Dn*0CJf$8TTUjy2z5jC`;9RviNxT)G6?I+d3mc% z?O?gIhg2u;xFVT$*i^$GNzpp3zAcJGC0RdzoK>7sd)0ni5kRpN()WLwM%3>i0;bJ0 z?pnk&ib~lXBJPIBsnTB9tvtT6EHeE=H4`ak^)sfQHuv0W8%}eA76!@)CGx%q9(Y+3 zl%e%nyrkqo;~>3*TJ=FQS)vMdEgQ><0O8CNR)1`EVfY%?pM&N3-S_PoV~+qujXCJB zJZzYtXh4ExfAs5jD?_6vS_!iGYT*V>U3}3QONy%Tv@6@ME$Cbp7}El`q?{|eH6>%L z%u34Den{}gPS;NJl_%Fc%u@nMrP%>N%WZ>_tI|awE2meJ{)sKg1vO!`ci7v&04lhV zn+@u4sMB?GSgZ1jaVJfTl$fQ_{N#UNBZ+ni>_?qx4FD0mvJ*D#hdiBxe zv2fEhx(njT6x9(ycRx7hbJ!>KosNraP5&hv+|Y?E1VFJ_AG(gM;2j|bwgT}`*iBgK$hEz;5!o$wj-cv zX!($wl0t?q4zs7{sdC(0iD=Enc$Dn}P-c9dq#dDyp`I%Cv8DJ}Nnr>_xhd?;U=%s6 z-H=LuLmr{0!90rGi4r!=6@~6&ptHVxzdGi%LaS4luR5A;Rg+#Vb(>A1cHS2d;b_tgMRpI%G$GuQa`X6?^ajFv4`O-&PK zvG7>56?x>*#yCcuhvrjoo<)_L1fH(jgMT0KgLi~>D-!6FIKQAXT~C7ZcA@2IycSS= zbYItWTt;WwZ9Nw4FFv-%B-3mo7t$QEWd@BC(qI{SPjVnIyRVuh>C8oSVZN8)Bp!k1@lT*By}nlfzWefp&O`$Gn5`K(_fwvDABU z;s&d`Y;*kg9c#~Apts>3zmgAyq#bvr9crgTnP>m(P{Y#SVkOSN1w#z~t!-_PSep3g zc8OWylX&Cg^h^#*#)yQ?Xw^UKnzua%4Qnw%*@)Y%@mvAx*uHBkW;6W9JX-^4q%K5S zIT^d+DG=vS$6GympJE|I<}VV@$s=<6?Bx7Y?U$^&sTD2jIyX!km2qG-!;@Gj$K1+s zJbAvdcJiP{;3>$B`29BjN#Uj#9RqZ;)=>KVWQ2MT8@N4UNE(0doemGONC}EK;T`O- z($VhmTVnk>GfvjOhpS0fy=KemY1+t6wC-_vW~s;I2aVka0QR6$)wVU(X@thdE{v6`{4H@2dv!1;A5 z>x$y#r2M_8 z-zR1`>%_1yO?EiS3M#u+#vhMTbT_p@3LkZ-(Yit!O~3X@zJ+|ZSk>W)oNgT|5DL64 z2l_}k#RD*dP{2%|ANV-4{3j5y@jhYB_~(%Nd}N0O339)6c&<@~n!nv)rp$dBlIefr zc5mCtLb^1vH*q8oP%!&)qp37b_|(JRHWKZnQ5#d^Y|TSGf_R-XRSjXrkuNtBi86eK zHDn2pG+PcurNv11pGLY*{xks%9YdvQ`w{AC`Ojlu>Z;0BeXS2I_=ZiB?%z*1n^LP+ z*HB+rSqt62!i|~#UTGmW(CnxVy6Q=eI0LKLDk+^)J|i4I&mMe)i?DU6Xg6Foo)VmP_Ocwqe zp;97Iddw)s4Wa5`VZx&@P;Y&qQIGh* z!ZB1K^u%E<`3Z5_sFH9+@xn{C{{3TS-2IiCej)p;w^gRw4KL8Vzr5!Uzo@YbPk(J3 zu^#x)U#nnWkC13V+=>A~ zETW~(*F@EAD9;w8QgF;tEH8>bcbFs_0@5%t!O$Tdeg_)kp-ui06!EM5?96+izwM|! zzc<>S`mPrJc{r?_1B#v?oTAs^!~^ zGa}etY7RbZ0+M&-P5J5P>+3aA*=HM?>hGTV48qd!ydu24QsIw2wp2)DR*&q?%)eQv zUqA^v)a>Owv8atVN-GKTQM`QTBN=Y?w zeJ#P^o`d&Ha5yCkxXhW57em-&mmo7E(`i{2HieY1d<+JJayb+B6E?)*^)Uk?L`lNr z&iMeXg8y!#-1NWB&kr{stNrC~$OfAEJms}{_H1^AS*E5kTu|dl<#c#>dikzpc3-y) zr$LImE0ROa9MJp#%pw!;K5!g)Mxf3}plbK;(NmzY4$mDXQHcPHWKJ0>8}cg0W$&9N z%#|*^H#mvQBy=nn&)ik~yf>xiTxrC!%=s~(bV^}hxGrHzA>DEiKfxq7mn`WGu5e+{ zM`}-Pvn0^VpF}o=oDmYGguFNFA7Qc=kL9hvjU`G1-RV-6SmI5N2`nkS6s|;i@D7a0 zvyxr;M#gQ|5{a+Qka!CpATh(Vt{P$XXax?P8_=OjHz)l=@9!;>{68p${cA+vr^4S- z8dyNKrCu6{V}L0pUMplZ|HW4GL1k_zVS|V;R=GpTaMgN>4%=Rh(r@&^wwk!o8?D`8 z88L&(N`ch55~oq=FAgIbXI+c~GdP!E$Kj^43CQx>w3oVu(nhF!SAO_GUdy^4k4>?y z@yfdc@)NgS4^nhD8Jcxp(<&I)y(AMpBR zKiFP#S>JxalvW@=ABpGgj!ONM|C!cPlhuO+${rU1`IC}a-+TZz66{UE1uDGq-4?*y zqiEMmCmuHrev1 zbaVRO89IOG!7$6kKl7yYd+q2am*ID&&4@tmu;-GPy|Kgr#jpk#^!UBO63dh5k(6)Y z@w9;QiFbHF3O#8BH5Q{<=M`RElmV@O$BWnol&>yzs9@X0=mj=ILD;=PlZM8>PqO5j3)IHuvd0{=2jOCfFS)zzY)t^+M znoYSFW?mY)o$7sRkvZ}OulZDR%=pshFkrsN0Alq{VX&Ry*5KgybQS`em9f8yrKwd` zzwg2}x2&d^N+GCh4ji)efRf#qcuhmYf``9slIm5HBAT^`N&ieRyR|W3v@!@t5q*c6 zW;qaxa7&F71UwGxM*VMp1AcZGz%n-hAO5cZkc z7ShF|teBsg#FISj2fZiJ^L2_wccA;bdm+iC_4EmpS5LdKcW2_Mg;Nkk9ME&Q?Vm$_ zlRsV$F7Sz0Vtd9g&K6U7{IX?rP^LO!Noi1YXv@-YkMYtlQpUQh!Sh&sCXD#Xo8c@D zv+lK>=B*CA0)xp@E5pB6MMZq`4>@)Am|V<4MG0^i`nMG$`_iF2g9N&4bD3GInl8hb(Zk-O52FZ35aG* zNoG>ZUfy&{e-88Y`!|OB@jfFOM`Chu6lI3|R?R@?98iAIBlO}T54%WLSN*J~fcku` zru%ew2st~rhMN7ttW7GpIFR6Igv?O(kWoYh_*RC2!rYCWUV;*c85c!2Nkn;N1T@y8 z*pKqAgsALgbJ${D>vNLLSp)M&$AV($a&1PQ)44(fjuxAhqNzcxip)ykSw64H#bQt5 zs*o+t{@v93dV$ZvF0htS*Em3Q{ZGod)1OJCP&Mi{EmB5XNgp__uA6+ZX{#Z z^p!@DZ(c44-muQp>MHBbpPsjEqJwYJ4$>Zk$pJxW2%ypRuY7!s2Oq_j2;?}tPYg@M zCWQ|>M2s!dzNp|^vEj6#z9;q8epPadd1mX$youM;XLRZxYCqM0tKw~rw&O16)ppV^ z9nJR1B45QF?^M4l3A$jLfKE`IDv<0LCsPC+P$73_Rf4Ygkep~AE5Bgwv*fv8bef({}=;@o{Nn&K7Mn+XPJZk90v%jK3#&%XMUK6EkA57k&X9N?zTLk4+~R7j}HeF?G-NeZJL- zToSu&bDpt`ylKhD_bb+4zLXcM%c!b)nn_j-{ep8nCfiw8x>v8aSTSSjJ4I-;pi}Qx z`>l3^&A50;T}6x^o`oUh6+Z?{T3J8(o+{{+YE?AXB*0ZZ*Tj-iT^?=)FJ2t>HDVJy zd!P-ZO<^-HN~)72Gv0=wT}(=_xY*4?e|vpk5V4cGh!Bto#WQ1Yxl{hgj8w9{`~*3K`5F=A-+C{D9XTFaJ|8AN z?ElzB9gd5kN)^<8`8CsQVk0fKYw`DY-cdz?FMXtU%Q-y+)F=VH5XQ93=)wL#L}?j+ z>?$q6T<;*h{V;t!<^)7WQumZE36!4nSs3fUm{veCxmh{LW^KN<&ijgRq4MpgGZd#8 z{&JD_QG&XvL8n@YK}>9HQ{A-=JiTJAkX7idEY;G~gmJZV&z0X zoUz@(cgT1rmCtjzmwVfzb05~@0f=d?oc->S7mG*xeK$B!lhyR6*9`UkWILcLDEE32 zsV*3Hu^IV8>=-z;|Ei)Ld7kuV8oYULVNf$wo9tArXX+aACePK^;xsWWaP*x`Qp1zd zH#tN>WLAb(lpjrv03QjL`^!-_9~T(yl^*(-qr&mF^|2mB0)?pl>_$R?kYh_wQ1S_@+ZSd`jWw4w*PYWO|yTTts zBTju6Yc}6bOyC|vm`JW7?-ZgU6}VS%0WZMD2*eL;Q@V-5cdtGSx4Jttbbkp1B!nIW zhSFZHki8Fyx2A?iK`Yr}rJX`GViqRJ62=y-W<=AaHjgWzNmYv!1pdb;^Gv^OT2v+LzC`Ky7a;G#lCxvIHZuYurzQSfLy8_NHr@R(C z7xq36i_FE;c2Bb;7@w&qFC^xui2;N5Yb-auUyhkJdIPS+&vx>36*@MB#N$j`&fgV( zjeyNYn)g4E>SZ9-aP^j=a&Y-&Fr!3U z*a70#&fdV}1|Yo0V_L;wM$If&o&t@m64E?u%QC!4|RDX6sT zPv}_cfo3Mh+&3#b9$2dXoI)rd4-+f2Uhgz(k?g$ew5wM>CeRQ+_@#c`zScfvm{(&q ze6oM<*Oy$>>G3OS>NHZ>8S)!!z zEcej~N`WD($8AF@meOp|7f*NfW<4cz7aW{t?TvI#=T$tVd+V-JPvXu!)yykDw1{U< z*+0OlIV>8!#NBO;gz%CNtEgg0^t3!aDY9XGBcCq8U+$u&kp116>69&>1?U4Q+b7g1 zC-tNddak_5Vf&TiThE){`rRe1Mij3u1mAIqMe9oT_$iZN^bB7Wn6qsfn3+zZ`$?V- z9?Dd*6DboTBQe~qCjO#gonN2jZUM%P#!Sdd0BjyFL0K3LkR77^40v){-M6od?_?OK zae>?hP^%4b3=mj5^j-x^cEW)=pGsx*`7ek(H21J{>U?s~ni?UY+$RSlY0$lN3+#?PYlGe+o#0h0r&O?FfHiZQD>?1)Y0T_x^+ux zBRIZIdm&(vxshb&8bIc)x2aIo(7la&)g)hA0*5h^EPOftre@l;|DnH0o*#OSnZu82 z)OFtU#6&i^Qv2J2HdTCCnB~&Fr``h~9A2{Ch`K5fph5xS4n2KTlhiUVN8eRJ6Sg{& zh`T!?aZwglveNs6wPw{oldkmQwGQ3BFB{^ktinEzE5l|Wa^>dUgVI7^3MHq5CgrcF z%iCpRgx!}IDcBL#9eL+bBR_I6-9d2Z9R!EI(SJB|*9LU<{2zW+k>00}R{AJ-xSW-> zV6Ou>a^uXJb157TjpDvGzIH%`P*4gaK$ctHC+yERO-)Bd>1P)jRaLgEzqOVC?NCa% zY<-aO+zXD50ceMfp;gPeGZVrgzIQ~=Ac>1VbI1Z-zb9MolkxQiv(EF3lX?qoWST&; z6aQR=eY@wT_`v6r|JJj>T_y+N95~xwn>zB3O}#u0HMQKgF)IxAR63P1ZOrK}TdJI@ zD>L4%o{A`$;bu}5FVs+KM0|G`wcTsGTDJ9GJSSv;Y~0c4^?xh+)!>+70HUzTW-Nsn zogpoP;%9LuHOh^SoyxvknzKbgQBDSP&gE?Z$*nFk;tOq6=Ae<2eeKW$7BRk8W(f$t zzQTp7z7{3dIato(kSt2a4#Y_nEDNi4llm0YBrwFGsRXKf_hvt1z>AeZ{cDkcyBW#$ zsG~)Q-A8TBwqJRY@~s2}Mu+lO4Fgy4NI*p`me4;G3y6~nA%xAPKvpW0F1Q?G%lcA*lLg@xlc$Fjy~u%hi#y1)rdU9uCnLB^yNvb=D(UChe9zrvCdH&3T&_?DPT%p|OI2qT$K6>KfqdAK_ZZsIn@@+G{C3+qY%48+J-7gd- z39nsufOI6&lc9R!Yxb-speRxcsPH=DfpreCzl6sQ~CP%SxTx z2lA@_k<11Z^Z|QS0LbLOvG&id#9?hWE|GqtsC8&8ZT%-UjRn_f&*YDI@}dIe7jEGt z74N$RzPH(&g*pAK==_WmT4KyOH*Ona51xOi*w4b8V64`~#}@MyWEh+xkBrqW*!adz zL zUou7I2|#co!-(kR4L47n?svTzAZWISYb-Rq%g+GjBXV_e@m+Z*1tD_Ze<+B44+-47 z3h2D;TsUM4H@Iu}Q3AeSFSB1pWJk3p&L*bgOAX`n7ajZNTJ#eOGcVNv6Bh zBKBd0hC`gzLwrr6fe+a^4LT~`D!VYj7vyB;B`a9jWs@+PF)NlZ6c1O@up#rkoQzTD zsy4|dfp0*ph@CfdL*Zt};fizGUANwJ5~7*JrhKJ9-81ycl)}seK}>KEd&RZ)#^}05u(5)zT6jm-^Ql==C@C zM{L&7RV6gNkC;^*v`>&mVH+wi8ETaQ`)X~Gohd`4k#(hcT3x590loEzW=8Wy-OL=^ ze|-&}B%f|{myF9~?)WHQnZ| zz2B0h*%r9}X4s)@A=KeV z7#FTF{mta}!TEg7z4C95u*5Ks&ke8;pM2S;nh1nQVf|$K!x;{JpQ<#sA{H2%)v9#} z148ztYZnS)It6G%lRN3p4X(2_3nki&8fp<_*Ua=k(j06yQerhrME_`QP@timb9aCV zvobQhuLVeZE%UzT;UXweP#)0d4mjGtc>!|tx#kR*2`X>kr;&4ee}^SFb~T6$2R zida6xK#pjcIH{m>jIm>5A*l0f=D+7-0N+a(8~9#)`C#N!s__Y8W}%7&ZbVOmEEaLB zm@@KpdO^>uVe!CiDXi%1^VR#&hnPy~YJE@9wbF+-xG@EzBi--|v<{y}nL~*rh++L% zf8^!4XFV_Pr%AtOlgHUe38VF&aB!^b@M{e`tut)zxYpgEyv^(I)1nT#$f8rLJ>d;> zjwbb0MTZbKN$(h^vn*Io3rYBvOF7a_vl`T$C>T|~kG8H{@?*pmHM8PQ{AS{I*52TW zC?+R?(3gaFT$?K-zJG&Fg^jSp!_HL0$j5ABvt;>jkw;6w_viBq#KLtrt;qKTt{%*) z!pRSNelB>sY%9pfMs`As(b`pXc;l^Pn@f+$W3b-@qfDgm+N-BBs0vppka54LrrOtx zJ^Rn){gW+eU;#W$%~?V{e^MU*4;S27eyJKhue-)j9%GZjn%-*dyn13r=3_ zJXtOd2EJvL>io7#;KeHyAMjr*w7uk4%et9)A`ZUYwk07KaB`?~tV||$b+e>r6nil@ zgAU8RcjJ(~W=eg~J$^0s>0Yk!qsS%g_$jGU6OG56eppegI0fBL@ogu6a!jT>1UTA8 zNHc*ql-cGs=r6xXl-DEJ8Ti^iHQj^ZhLAysb~;}-U!U}st8{!9HGkFF7rR^6{fN85 z5;fm1WpEywuQ@mWV3#}AY&B2$SpFvu7~YNn8|FX!Bk!X8AvRzKi8m)W4KLcmMsR6w zW%ZKVVL|!@>+mRJ;mneliDAUBv}kpOy{tx^{WIK=N^G6}`JRI(zkKOE) zXX&Tr2Dgh}++f(3v1D$NHU5UJHsXEoLB9|Su83VFnt1|U+L&y)mX_P}Loa8dlVoz# z%!MT_m6R_3+?AUbED5kJ>Xu5Bx0UQ(*hE8Iyyoom8^=YKV4hu!g6ie*s2Ct@HJj3F zvVk&*=N*1MZBO`?SO0i_yV}qB(BTJ$0j3FtSqi>ngsXJr5EHZ?;Ye$-Gd>QsJjcn63%e& z`xf<0_Erg<_IkdLAiRSl4zKfP*{8qxEgh5XC!OXST$5Df_h%DKo%8~J=bn+72s`Vb z2VFR+2zdZcSvw%MTZJPgZCSluJS%c7Kwi zJ&sqWr>^fd3kP_jL(WFM!h=0~;aST89GBdO#1QAiiFbubz;VHydU$8Mi3cNk z4ks})t({5I7gz^o)=r+9v8A2G6zBPNZr|B};iVT#h}9dBpBe77?uWM{9&jb>t_S?P|2itg ze@-?-pUp6GpnVt@hT|@fRur{a^gWb#zufB7`E${6s>u_zsYq8V9UMJs?vG%SohCm7 z#b$T|w=Mr6W9)A!m;ZA+)*Vyj4sR}zd7=2UT=u3@a{46(L=(-cFh_reHdED-_<2D6 z_S#<9V&MWlF0B52!`0@oggh->{&Ik)UoZAc&ywm*% zv+F7M#8wEzgNO~2_e=9S}vA>%joU;i*~lu z_enB;(grU@?y2-T3D6*Q`qe9*K`(8tW;6qGrnQUP_O*Z=lIWq*2kwf`qO$qV;h}kf zQ3fJ{VUn(RyL&6byfVb*V&)x!_C>sjWQ%L0j?NXGMhfT}6q)fzGHQDdeJMJwElf>o z2l?p}GXTezXFfebfX|nxKAP0jnD@+`pjH}6LfF!csf9S zA0D{$lg(3iNSd6H$byKv$UY(AKd|Jc$ePa2tZB4tXF<+VzP$IPW_of8p_w7uG+EL+ zzvfSZQTJagCl9Z<6PpPg^}oMw4rM$7uQX_OD{DTJv{=Y3Iq!GD?OJX=I%{BkAgZZ! znCZSdmn*{)eKUiF$o?q8G3Ez&mNXgM+O41tzo)P7+tOknSJJ(s4GDr304S+|d4Qt< z>=MoVYgHy%FW#w-1}>&K4kpBk@wJm?@9Di?aA2Jib^x&_Y z+*-6yBtM7O`@ngAM>);R))JoH3zW`M-DOu34{GgIYWLy*8<2RCmICat=zko$^x#YV z4Y^qfqC$#WD>c0?Q+}rw+FJ9reGzB9NY9u1&aqMsn%pMa9e&wy)vC+>wXJIcL7+cu zaxoyY@Y_}z=+A z-xJlWms~vV`Rzv>j_|Hm6v;TD3@*XF>_3nGhKV!~J*dwUoc&LC{u!E$OncU- zNA;`$ZxqQFQ?vF+qu&q2>Y6$3v=PemTUMT?FHaCIIr+Y2yTB{`Ac-=ewjB( zUKlAZd^>c^wp|Fq!3r+|h36D*D zuO~lG+za-sESqy4^(SO1$nOuFof#w&dcN-ST*sejF z6Vvx|C4A*8wiR1wtmOmN+kF*4;_bb!I6_J^7LPFmFH2mS5k&Wuv?uxZ6HYYA8OY}R ziP2c{Qy;Al=F*?xS`R&A04Ibu+F6xZFbvgXWnQ+&vx8gWBIaJ@wKc9|@9nnZ!)DAZ zYscpANHFU9qLl)2{&g$>3@rMxY42-OU?+6%9Q#)rRsS>K#q+O6sJ~AJkI$Mt>RLy0 zHNb#QDjRxk9e^3q%>Yy@SdIkUc9Po8x9%k0sMIXa0B_x1hN<{jgc&SuN@u~5#>IIANR%VjTQf7R}|Ym-Ov`XC!k zTE1gH4L-<1fgFU@{@#SJ+0i)(NHh?VWWDsm!?wkFXKN*bdz^JX4_Q90jjx z8l-xLNzestqM5lgd-`9_#_%FpW`&ragfXdG>4Vz+8c7oK063-2{t>9!X?pz_4QW}z zmW%-j{4wqH69d<-ho_Co{B||62>W>q@cu`4Kz0ZHB*7`?-^7_6u6#wqNV&Bh&B{bO4@SH+}9!WLZJ;rAd$}N zk!M8W*;dSVXX|+_@CjY`Fx)fqqSAm1OUYmIBKCfT`-1LNa!T-;?`uumwK~aPlh>C9fx$i}jf;sw%sGqi?k(Ul9y20S_dMl}TdtOmi~ zJ}^Mdp`*W<-b#&7>y$l^SK648xS9E$DYZ?=jX*I#GnMztRke7tb0S2thMdkN+k zGrse{#et=U6p-?-@hioO=F)!dWhAOWd;sRt7XU?tN&*<}ruN?v1TZ<+3-#Z__l7%} zz-^4$0$N*Z@=gr8t*q=A2@5JgP672S9`}2oEOi#l0nL$Z-M~+ zLzTe{M7|M!N4@}^kL@QuNLk4orhW^k%v!u*V7mZOMYf>Fy{c+K}pfFjhB zR!C4Y?OO!WV-&MZ@Mr6xcHx@TSC!S1Fl((CE$L4J*v7+W8~9W1gjH8rOP@~l&FRXn z8$Kah8>8+89m>t+wlw4To15v&xJo@cq`m3E#QPM~!u8=<5XcOFKVR1U4j0O9Yu}le zx9O^UE~g8+I2vhu$&KBW2gJh=n`Zw$PCoi{)xUEQ6wup8V8R$sPhw59zf_&yqOVb{ zeFB$g9Oi4Yxh5b}*4fzoREZhVeqkeq_%tLW!U8O^@865;@aLpZbqZ4fOL5&G_+@oL zl)L%u2LnOJ`>QJDn$A_OlZBr$)GOMhQBkGcWL^|4yC!JJWUY@y>Hw^V-@*D_Aj<&h z7yaepou&y@6c9RpzDB;z_)T&re0BIEE~0-`&>pp3>Gk{lrs+u0y@RzvQEdo$p2 zYkY*I{wun%#UPOJ2oE1t0Lkn0Ul|&{(hd(-x(?9xSw4dPgdndmAPFr$tQgD z4>kV6mD1bcKs06~jvfvYw~IdzjRv`R{IPVdWi-N4+-V7BvVW+_Q~!pyw7%ZN^=k$P z9{>kh>{k&FzhVO1^$rh6N*3s5X&}5h2OZ%~Y`^C)dkBAwB%{NUfgB2uEvQ~B(5MQz zI<&YNg_^tfvz5%&Q!;K(emHg+T@x(A`T(_U0U6_II1fHH7ylqS1NMHk?;7zf(|kz` z;r?OAWmb?0FSVLTya57 z@jay^?X z@=V)H@70^9q+m~MEl=yXO9hykx0ywHiA7+*aHV;1K3oWZ={y3$z9e&@p}hV;uyCJY zh1T=@c;@WJCLPGSpp1L?9?9x8>mx~%xH}gnyy}j&gSPC^DxoJ?CYNe1O zzh)!!DPa`9lHi@=2Ml6Vk-d$6xSat@RdYG_^vp2CV!QA)MKf)bz%^g`lfOQeo8Hs{ zZh$FRjfiC-Wxq%J>*VmQbe0S)(l~HwN>x!>+K4hI(~|jwmx$8;D0a60HafEx19{`S zr9>cfm%6ekGHb2_4872L_8nm83sDX&+6=~bhVIa|U|jQADmv>q#5=Kc`;DgXwAJ05;DWJxT&_i{8JvMQz{(bFc>`6-kkp6_!g4 z@`tO}(Wszf=Jq5vKHr}%RqDO%vt6h1#Yg6)h;OZ7rzsS?b%LW5kWDxm0DYRFL5CPf z%qhFrwd_vGzR&RJeO#nGGI-|0_P3A4tz@u6DTj}={DJYUmmMZfZn(0(ZA8Pv$nQ%` zNt0Q*w7=5IJlf3tZJY5$5h;pnl3>zKida#4CNv*7X8LAT4`jFG z3#(I7>I&v%-gbX|xL04*+EO{_B7#Od4nrMCL$z(|XeAKh!Di(9v~-#H=!fiQvhro^ zfQ%T?$m{wkSzw(e{}EZ{)VB>n{V&BJ2Bb-YHhaCEymJ$^yep=<=mn&kcHi5+^}1sr zirKWX_lkw>Jz{`hl9T5@fUh>Bc8Vy_36m$T{@ch;Oo+)oOFi_y=Q_{F9=iF(89&(SUkPQ)>~JRbzv@U%qv5iodVBN=D{kq^SfN1GUB<=z zK0R=Y)p~Ex9MA~Ee$}L09I+q|m^KQcbobD26D4QfMp;y>4qypYK7b{;iMkTCukWzr zA4#rzOV>DHR7&Afx)3q!^Y6M&vv%-ugWVkIN0IuhRSy8~(3wERPXt>Q_#Hg)0C;x* zZtvevGJU&o_?-XbDV;0aX_Lcp|Cbn0Qh@anpswd@Q5_StNty+Y?I2{Ojr;g$)0Vcx zKJ8}X$){z27DfQ+$uq4YPjVL?50B+fhd1nW-+a*^=hIQ%ne4yKU4Ny#U@b<_F|Ix@ zcI`OZK_7F)>u%21Ql$8G?FxofSbAkR;=)qQGhT@q)+kyVn>axf7f9$@p312fE1KHB~%W@uBtSLrARc-xcmFN3SA3oi@a@Vs_U* zwHMt+|2fG{k6Xg#a~L8B#mvs7i^cfm`vVv6utPdV%(u=A9l4hY4*T%ptV2>m;#`ud z$dM*w64x06Np9f5fSpma*IiXkeBEXh8GQmGVLDSN7^>{-VQMnol) zib>WeitPJ32xS|ReHrVFZ43rutiNmS=f3afd4Ip>d4KQwR}<4T*SW6ioX`23&pF>E zS(PP?&7vbXJuX{+>ceY`cdgok>Eu%#X7`;KI242@fv-vWiMS4!Q&y^OZot@=e1#qF&>+phB@8u11R2=ZeiTy|$!Ymdk4z!Vw6dEArZng(mF z{2fEXK`Z0jePU$^*hC}KoVJra&!9hKgH6V3Qh1vl$1~&-6&lF)l5s2I*Xq?GwRgGB zAQw#aVUs^rBt{zD_)%Bj4+Zz&Lhtma>d!JsDPMQ>>yd!VxOx^P0|KM}p<`cOg8Iid zrtve{EK4?y10WdiPaqf*hbq(E`YvMLuR@v6Sh<^5wo@5wm$&I7@ENZEAZHtGw%z0H{mUq7o;>Gpx#aH5+>Pk0VrJoS z{^diUYr9)Mk(gDakszZCYD^Ep{~!jtanzoVRuLg=H1R|zX&MIQ@*a> ztFNZ3EOu{!6K8Ty7ixP-ZUr*-@7n)y1{~v)efz)e=1Ku_)GXIIAD_NN26u*=z(6O@O&5_P+_rxS?|E(ki@%zxp7 zIPKKgzgR(KIF2rO(y&1$@>0yTj%~e}n!NY{-P@crx7`PEk#A3^^PBoN7;~eK<2PTI zUn)4t&G`%}7%GEcb(LVTjFc{b>!lCIE@{dMwXmg+M~d64S|Sl2?ms|xAINt+LXM+- zxt`|U;xgTAE&$Q3!JK$^#?MW3J%OK5<=FN(Y3ZY&#+jg?mSFa>k8{X95*2flc1^$X*&6N@O; z`FKn@A*Vr>qxKs(;w9wj8-_~Yhy%-k%Jd&~SJ6tw@uG>+G=Q@P3OwOkR36Y;WK3;s@c3NB8!}Fmub>|8yxjn@f>ro;9u~ zM#f;sSb;}tLj*J<8)u6(4%36Ko>5z=79+}?ty!!i;oXFeU*TtGcP|IZ-(6Y zMLyMM#7={dENK$*t>tQ@QguIW{2&`mZr8=OLXrf|P`Jw1$iC7iODXZu zfP%)qL4ffA463nmqRD`aQdU8=0?~GML-54@>SEEBSUc*@Na$SLE~T&kmvo~f%$7gVv!)jtFmNeZq-CrL4A}yi|tbX30FgKSkmfGPczCu6f z;RnOV&;BI4DzcTet&E6$Mud;c#nC(nhNhBv;uu3Wr8Bw*^-hncT?y!ZnodTQja9y0 z#F-9LRODbAS*Yquw^*jytNBFt&V44Et%`mf?@;UUZ%}s}oVcw?no4mxf*=%L0D6vW zVEyX7-!Pz*-IU)mShcG~!z`ds5!6WiKz5&KZ};0z+0LkpQE1|{uO#F_jSirV88Bi2a?g3!oTz7GqfehA?NngB@sZSbZ02KL}o zY2ao1T~AOkH-6Ha7TxPhjNv)Dqdq4RT%{z8EK5Da6J>v1ZPOMC_1`Xd?)5$E8U_1M zLOtNpZ*5+?Lf5ME=_XkIc4M50GC^q%JB>qgWEaC#js}eqdsSOThGSE-^Mm4qh}}=Df$(fs-#`4Yqb)*F3#5zLl3Fg3GyZ;ODmTPfQH>slEz%axil_Zu%OfPICL16^1d{^+XDAk^htdfMjRw zdfVcORnADUbom=HYAB(Kt(OGGag;qb~#bgMUAy)%O zG>xN6!6nU?2A78EqJgMqAZ;&52R1&g?G{va zTv_kD;9my`)V^&BF!KUAIZYc` zNX|@@EuDSaJbK%*!Q?!hF%^gs_@M*UO@<>hxykXuKI5+i$XoDZD!&_OgU+w*heb|P zuO|HjK0^b=`sjBB9upqSYp2EpCLVmZc!JM^jS#%-b%5^%C@8=GmyK9#xHk7iR)RY~1`L#ATD^|KK2JGaH>u`co9;f|J zR0tjn&%QYP#!Z|XFJ>?R(Qn1|-rH!4yNfpdz!USZdmGefR(n1v-7<-`t$J`xmdPN# z@U9FKz3BwL%Z0ymde?(cq47i5O+8YjAl(8=^Qmgo*49?AhTsTkPLHJ@p$T!0*~7@9 z*7+B`-_O+C@sX1Y@maC_kYGv-WC-`fMAihGAmO#G$mwUHd@x&aqrMmo%Q{m!gmx>g zd0aINs~c2E-uT5aTd#&+R3g7I$fik#-z?FrEbPRfvsKa8ox2i|1q~R9`WszN%&=;4 zDTVuguHdT}|9x$4S{7!w+$0zME)~V`S~~aUp_pKdf2&c^o$({Mt0(5S_blk&8~59} zIRtLNf5C~wjYgvMT%(ANkLRp)(L(Aq$A*@^8*Jy(Hs)mYDt>;OwD8OeS3P1J>zIwM zvj`O*mGB<4(;#AKugK@np9M%;{;W5&&YaAWcXpQz&%ZU^z@|Ov_|r(KPu0QKa_auJ z|Ge=hOSvy1CjQ|84{dj7I{tTOt#2Rr;Xr4Xe56TPc4s-DlM%8$wyT+vWwTXqta?b* ztIyM0X9hPu;D<}zp9uboQ|QEfY(q=%aUIfD^<1UiOv;*(-h2FhTGA4)R^la3{PD#6 z-T2K*nMbRszb|;fPxle#BKm_h+ffKMDU%=ky{#G;gcu$Z+?XCUI!oidCV*>egor~L z4_Mfvw)K7%j&c7qT3dSUWs7jpoz5hWHkPgMJ|wKnzVY$Jq4!gPBw&v+HEt#SjUDAV zK>`S+l88_9km&iRUY3&Z|CS)YtM*;E4SU_OdfI`(YhO zVc)4Q4q^(asG$lE$6_dRVkx)WKB6dJ6!U5(1G z#J*ZRnYS?f7D{TSXTR$BIyxs(LjFMra~p5C!M#*mz2(%KnE18UDeBa``&@2g|5$z4 zz}++7w0Hv<-EkT#w|UO90LPn(PrqWDVLKd3IDFdU#{NQz5wUPV~T*@3UF%+2tvd&n0JqH%Wu{#F4L6 zvQrdh#TVCpvD~LhVT5MekU~v}@Rb5z!EHpO(tgzSMw(?cg1Zs3+u@hUSm*J?29fU% zp9G|G)AN0D6|xN+h0L;8Nw3FiPzf}3|6IEl4+CrvH~Mm0@Rym|AMI$?)@vZeLiP9L z-3w&g{wkJL1}dIv#QooPbA13Y#_oe#xY?h}!|s0YnHk!z+c+8hv0m^4HV*06XHdIg zjslt_LoSm1+WsWOjUNNJ2Q{m)C%XA#5p5B1dR<4PzHZ{=B!Jvge{P~@`&z~Pv~|g7 z>BKn}6;evh$HrGj;t0a2?p8kMwv##&jNIRjqG7@D0&kF`vDC{lo4Qi|(GGRv+S-$) zFx@($Xh#|^Hh^>1QRS^}H1V-1BD@GL7zsN_C=5&x>K19*m~Gy;HW!34`PB4mtJ4|e zDz6NDBK~0(tyZedcnNUohW& z_DyouKeiSu^(=?9kg0oUQ|@{A>YlxXn(M;wk+3XONYj$KvP;PXOIP#c5`-x639gU% z&L5}J!T(-sm_J_ir`LD4w>h~hvUaN`<&E*}G``OBKPtR0X!YJ@FleIWG?a-ft$6TJ| zy>Sn$JJy-+CQq<22vF^pOF0!dQnCM7`pGNcM@L>{fBy964+?@<6YLY7_0yg}8?JNC zM-0T&zy-DN72Z!~>l?>hbH{I!PlT3Nrd)^MBKt?(j||wU9_g*gm%VMMVh`}?n5Ln! ziIfdQ99fyp;({G>#fw4)QXmVPjWn^~L;_0^?E+S=|3d1TU?7yF3R&WBt7A=sS}7&{ zQjqiIC`gIZId?!y{4UG7M1^b;a>!>dY zzSf2Ab@uhYm-9sXQ&#D!YLVUe5wi^yLrEu9WNZ30qpvr43YaJOo0dxu8UQDAp-+Z@ zwb?xX?y43OI5F3LGfr*g);|yY4d8VwJW0P@nD?7$l9V$nF5wIwYyQ<^5EANzIkWtY z!apwg1$}~w{e2Bf)2YAc)!TBxYwG2qS9>OR_dP95lKWm692Ac1_02kLqHl9d_}TcR zF#p5yvYHN-!P15b=$6dNec7Xjjb82> z=aAqjEf7=YYtC8k27jzf{aKsQWI8OtK{O|ogOGmZZPfplF<}CezmIe*FvjIK)_i|o zX9129{_@Zm-FEm~dQ%!Sh~We0-)9Z!<=p0kQz7RiMGoUaE&SJgboOn`KN#M1&qUco zkYCPN--i9IlBTjv8{4vB5p-tywk*8yzz|IluG@_xYtw?Mma4-RL9ia_ySfaxG_ic# zU)dzbL|KyM8IlW~sTNr_m6hh5h=Sku_Td{KX;#c1RB1Vy#8d7-h>?@F(cieSIzbX|BRv+s6OYQDoE4rHWkZF+1zG{En z-Xz`KJ}6t*vjl#x_SNK?O-)Ub>}^pA+NX-c3(~!^I9U(b4aM3r;{@Z9%bN4z%bJQp zIGI*KIf8JSmv(c8tJ;o_@eSWFq| zmXgGpWtG2!ZNlYM7g&^G!S+&jK(yi3(VYswIGUX{yqZ+X{AOl5w+j`OZR8KvnML9; zYa~mVXjVI>)b&#Sw_MO)@#oI8g59g?Q%zd^+vAk5%COSJ?fZ_wp4uR8zhF5d`U+&Q z!88f%=(xz0;M_EO{|bo{XyL)7TSj}_A+&`rA`NV1B0ogRHf)E#shaE~(9-*YhBPBj zj+U`8*lApql7a3CuM$Rd0Zv`LWx9HDZ$zX}iz-}i-KY_eDDle=v8bxSvX%78U{>A$ zLY1azr2T2%lVli=Eic6R2an^k?#OT#%`1q;p`<$scQuqOrdL<|%Sru&-jMJQma2{7 zeg&;@YAM5)zxp3-zX}##{M&$~V;Mrua=A|3XHdIi2@s-b9x3FS5 z#>7+EJFGN{N@VN4r+AF&b+Zp3RGz#Des%8XH?O|}2=Fb~joOmM*&)We{r`xie{6Iw z3x{+9N5y(mO@N(GT%14Sg?)mkN&#HDvD}l0;;z{=Po)Q0Y};1we^BizGrEOGh2+UT zZwv@t!cMW%*q0M55^Tkl;~@zvwJU9v?gcWDjNqw|?XpnrJG^I24B>(By5AzTbx@#G z#h0?JpBJe@Px3^c3SwX3Z;?r@$~f?CA^RwT7hgd9956ST)U@;(C|LmhWeSw_~eM0VO1Lm2mM2Vv%rmKga_~Fn-G~8KA4`n!}VZ9G^gqXESFg*^u@#@xxG_yVxn>LtbgVL5RP_^&? zRm(M|s^#mZnuDcn)%~j>S<-*Ofz!-#UHWH>pm%?t)7VTbkPaq5)Ibl;AB#=u+S6;T zY;w$4SsN_n4$JulP|>OJ4G4*QIGzSuSuZh1CFYmW(xDot6)#+H+|I3P)bap;cpYm7Sc;Lnn z!rE%U|78vb&EQ=3Vxgc$oUE69!tA!_nphuE$eN=4coB7i+v_BLvmgQ&ZutJtQkX}R z>LzBd-DLCV9Cus118hYbA8|V+J?z0?=>#{$ZBB>y-t5dk$>2K^nYVr{cO^J6W|2_R z=u6p5q`(!u_{Db^OKOr|sqqs}vxtY@mwpg-fy%9cxC)!@B9syps8Rhgkr6cMuvu3& zzo)4xYw7EfI?(S|@f{H|0J|57a%_+T{9|%i|6A6pNmrPj_u|7snZFU;d6vNg4dHjG zin+i40=M8o;`!wSpVBZTdPgwqls19$n#zQ=6-5E0T`buR4$Jqc!g`WuMeg1$fed?+ zN#Xl=?hk^I=26C;nv*oSH;XkDGGpoZl@!Sq0Q#>KidGG?(dG=JN2iCmX7 zQDHc0mF^poiPS2a<0fq_tg;#O1uNB&;VOLYLr3;20Oe2~NX7=Qy;wVua3C-=7$ z$LY`H?0(=5-Z_AIq6R-I_!zu2*k(%_{cEoQMz{g2nEW_H<*!HtKJO)h`QgO4bi))Q z;Mh9UBxk~RKPUh&Dg34-g-Ig+qc~F3ANBXesAW2f-T>B<{(pav)H2wH&}~C~R!0Ny zF-}~momZ1unU{^m-Mx^i{{y@fUvP|e zWy}q;q9qUig(UywWc|Z8>|uVf`dil$|Na&%M@*neA4^G}cyeU6H zJ0J5BnSwd{F=R&}Wc*pPXUr}39z$sr`Ew~H$L^f1X!H^4pP@|WTB_TB!n6s)Z4_>) zzwuVfsoK~7;*8ARaKq!TpWQlnv$pi3=OK|Sn^MQNUzJ+Tsm*>h54~cx?1?Y7!_3}E z-yFNJDzfDkU+)D$D@_*c6fak)+DWI&BGf$7^2Uco52GS9_G>s z1s+AVAJ$J4B^MKTKH=(L5xaD|qa=p*8hP{<&od6+&fYnqY!eXPb=pC`{eiM%NKRkt z+H+0MY^x6)nar#Ge_7~H%Y!Yk*5ms7m-yzZwu`l9`)kn2ri1FZNR~%z#v)@b0yrgz zt7wPOM^7jz?Ch+35JMWq`HU?pD1hy^$Xm7s9c%=Y4KkOqc;b*Om{hV}gW)u-QgV4X zXEmAr;nmQF;XY~|s%RF_eK474NzTF;nCJ^1K}ZGSJy;*I>8cMz>H{Ikip?i-e3Tt= z);5|jx!A7XtqX4t+>E%()03)T@yuKRyA|!Rcjb@-jyl!Xa zvBkAttGk$|W<_jyU5VMomVV{m9=Ae8vsjNP z_a~28Q9B%ER$eVB3YH|d>=LxBEfOX%vrtfXWj>Hml2O1_jLx1sAtpb;MPix1B9$nw z={m2q!ZOuitpLlv%M-zn%k}TL3K>uDKDFs#!;h~RId~_I1Y0<8fL7w9gd^no3qN*bA3igV zz!BJ5RM`;*eV_+)I+`42k;+TDcC@62g)|yid`MrHKpJ^Vkbdd6cXUE1ZCGoHS%76%zxjCXDEKE&0ZpB11|(AvJrPJfE|StMoYw}0LTu83Wd z;4&j}yAoLp1Aa+ER&oznE@4+%!oEid%HQAd-0<#ryak}gn(^-MQ zz5k!y`tKvoG(syg0RE{g`omv3rDCC^U5b`y{@KdTJ;spvr*CARDX}?<3*Au-wHv!o zS}JoWt!}7vz-=zsC0bsG#8Sdd+6AEokHK)E&F~>;F?v5%LP}hAs!54trzeYREL3Rr zoFB)M*lFpUl*#z=coHh4lJKquZl|>|QNGl-%h|ERp=)+e*~rX2s(Hd}qq`}0!8jg} zKSHBRTIUeKuTpg+`SM*3pw}}s;_J$m_pyzj4Gl#H7l)$f=4vwrgw4$m5o)AUQ(s{H z&wE$&7)7&&l|Yy@`z>e2#S=J43qQ`v?OjYx`_b!#^hs>B`B>!gX{w`EHoj!<7s!Sx1}U>>hmJvgRzcs;wepZyQ_j^ zB&}B`iHcXR!SK#6o9p2WT|tjg=^%!G-V-z+)u>PdQNeM@P)^6-rOn20szDHnY8Tjc z{ny%<0r%jV6eNBbqaWC|9AiQN1dA~QmoK!d|Aj}aTT(1MU!mA<&tP< zkU43j$KcQSx&geM_wz3OFlOiKn27m&)gPtta|izp^#5}ClWs5_L)ZIj)qjPPm%{dQ!xh zEjeVyv0tAEG4@kuXqnu$upZ$SSt~iTdZgO0h{x}7UkK_6s#sC?@e3|wq715$RO6$) zqbo%{(cEbLd}*BHLC)Zz+HTpVP#rVWE=vGsYj475CCRD7-A0|C?}=)!kF5C`pHA5>nr1dtotFvEzT+jQ8ru zl1Su}Yp{8mWA~Cri;UyONNW(Kg`>f%V4!XQ9wEiAYe3L>7A~C7Jj}LSq4=b*yC%=7 zGE;}Mw^e`TDW>KZ*S$ne99gBNB0~Vlh_u+3HSgnU=+g^zmkJN)Q; z-sbS4{)x)VN;VpH_;9Ms;m|d=VJIzKWOVKte+$~_6BpqYSKm8pJ~v-;YHRXIUP-v=%zcnW5? z+pWiU-nXdEAH!(VSwE0H{u=*ZidW(wvkt#31~Thwcg<436+g+W9&&PjyZQI_IWhzznmFMOFOX$UU2k}dfk0!qlR530yEIf~ncmqX|0%U%?< zp5CGAt#3o#J6jYT)9_0?1+STSvx1kTdQVy4`ej!>+0yE>@$%xP9}ew5AXW+`S^d(Q zoWA3&oN!CiwGq5WJF;-r2$Vd%C9-7&2rMl~!M23i?=sQ4Prf$});oqMg;b`w>pu4+ zakrtaAi)al6cxFwoIc)(PTJ_Zrv<8quo$^)7uud z(y$L98Q~L>9SJE&yLxos3;HGbrJ?9r5@b{deNBIJQ~H>ENhnX#xGc_ai+$UsYJ)C! zV+Yu7dw<5$ZYJqeNG4D#fqXuAbpXivH&>5+UIK@#Ac{$cpR}0C*!{qY!-G)lNp^N6 zsKo^BxpJGwF%kT7AT8!%@fYwS#r{t9P9=39ExWS2JKF|%EzLL|y1h9vHHGs9P8)x7 zPYGt!F4_%SA=yG>Wu4;z@z<-?m-XRV?U0slU1>dJI0&4yU+4(bZu|;Lf6h^zEKb^1 z11++_`GmSH<5-DM`-B^LR<%xL6zCJi)zk-8U+#*vur+~}{4_tY7M%ldc|%$5v9R8{ zFseQVCWg9xQH~10-Y%)(ZvrO01VKjn^MjNjUr=V6@)mwom66}P61h@l2yIvLD){k~ z(r315ZzIDSMqQtONPmakPs2vx;j&gAcJH$WS8cTL;V0EUjFFO&4VLSG_qHNx+uw zvZL>j3As-jB_*eV@fdyn!=P?@({^zYin1GJX)LX}zSp%%KdO~4bpT4MLJteMKpqed zMzTWMoo{%G3x5=88P_XzqvGLGnm{7LJ$j|uJ+{2Ck7b%>X#;MY$Xp$Ykl_|*2=mI4 z$YQ|QG!0j<%iLIx;15tZ;qK$^n6}&e*hD8SGJLfniW%IhNDE7~J#1Cu>zdu+CWmw~ zQ&Q@pTt{@sK&sQHCwU03*CX(Iv;nIT2TF}EUTp~VXgr$G8t;n^n9~-}B=8Tq?&uA^ z_hoicTiA6GXTWNU*Li)nME@IdrZx>b&nP0qIVcVw71g3hOc1oGWh!tkkQJHWR(9N? zOriaaB+=_bD^5lCRoWT9-R(-N>r-EEe**J7%rToRpv!g1xpfr`G4W^>Q4qP<$wLTG z45GdiifldQ6GMGkjO5mCllb9n9@jH&)2Bwz=I-F$@>3?99pxsKl2?L9vyPvvX{QZx zwE}ff%kp6eO6jsW!JnLi(3PP3TX5*RpZ2RNiKw90-hzt@9{$}JyZG~NT0INN-kIVX zk|VpUbnsC1fh_lN5h_hD+XnVF%}9@wq~maJXAOd-3vio7zh>VI-IrCp<95Ht52Sbh z!*>6O-CT)(_;B~EQpd#7-{)hQsy!N6U-uPjvYCE$ufdLt6w7VP!20ZdBFp?GSn}Mc zewsbPhJDJ*QrtXtbXx0Nb_e;LfILO}NkoEK;a zS9d+$Kc-?Zkg~6T^Z-^nNmKrKGJnV$Waw0|EY5^AJx$Ob=*3-!(XSv+uSEM1Zh!-) z^&C;xt$No_*qCuWbf8m>s=oUAtZBe+T`9cjR`AkqNPKMuXt|1aXQZw=yR$8R9n&cv zy?WmUv+!ln$F??N7+of+x&>+Dm0(=R9UIH13|x4BD7@9Stlt66?p*?FUWC%e@8oJMWi8ywY~ z)QR#r$gay00E=KaDu${}R2uC{SBf-aajgtSoXfol?};)|cJz}?!bm@_2xi;Py7$&| z3;E2Kvs(5Q6qK4GwR;efF#R1~1EY$U+SNcDc+2(% zafx|dHg}h?fIt4j{H@olqu2E5m;q#)S`0-sX8#2mGaJ|zPlsLwMB9`odMbeZ5J)6) z)_VPh_EX-AcgGO$$4$s^%FI1-nFd^-goYE3cSWd6eb)^vfRb2hr}bq$y#+rU97|D* zg9lby2mr*F)E7LIdt%P3XY?3?70-9v#}?-Fj*HanDcq7K>^6JpmkJmRz~>+dLxf34Tqm?#{BDZ1*z`uKbEDN9~k#GvNPHAW#Ac-3?JN>`6F(=LrK z|MOI`|7;bfulW8#2g+;1Z9Ao^oL}8pR`wFEw~gbqyY?#dwyq935Mm=@v9s9%x_1dm ze#6$5g}<~ekz3=D<#Q*9q&D!ZsLW7!tj5+I$=3XostbWmOv_eUt@)O(#v#S_6K=V<*z9$vC4b{W>(ElL zJI+?#W|Fgu9qJK~cCkZ=5vbD)X^EL$+4gg+_)3{TPx9+}*VTtlP$#e64QNNy5#62E z9{|vP0w)`Xl-rO_u9y`t3_%Fj=6|{ayYPe|Ed?K1!Y;Mr-J9}VtasY6ou4MeX>-C> z%h%Z(hw(!yL}#D7sL7H9pvqmjZsu7W1a2NhIgIBX5tiQ@$DEO5=v{aW(5UvzbnIo@ zqq980R|0(pX?}L~Jm|5XX^vZEF^)rJp4|hT4o3&Qsq^!BS~`SW1AfH!vc{KR-VbUG z&aQ7!{gEbmTQ5~3=E_F)6w!w_N;WoS9v+G}9*81G_eLEya-9D4X@2I;fu=fHr2z}BQv(W9skrZ@SSK6Ge{3vFG0$+DD(U|#!p7%`-%PB7 z<+@q*<|jY$05NxiT218P(2qS>B&K@5ch&?IR7ta<7Q}n?KYS)!lUYNN@ShO!8Y1fn zK%#S%EWP8mrVRCpx-!f-l#Zp9X_wC(PfK);v2m-<85i?8xYMgikIQ%J`{f^VL?R`i zf}RjqU*x+PutN{0qm#jzEifI$&xv>>_M;E#M`h@ES zK2+~Pn!W}TRlQW?*jxSBM4$!9LRxGhhe3Lx(@bD#FWvW$OvpDIpLd0)ebLv@~>2s+HN_<{$v^8i{x$*cx4d!R}FqWR$Za-KWb}c zzUu4FGY~5iZ@lXGZ1SLzKKPrNz3YBaT=UZ{iF(8cy-{`x4CBrRIBH3~5&e7AkiDs5 z{_V9n1Ig`ITdT$11JzYjKgggw8@aEcRz$S_hMCp94M|u1k+?*k`BL(nQ4i6!nCM@A zFJDYgCnqrkO?1&-xsIlHnOPu_X@1MZ?NX5z^C zHh_!X_Sj&rJ&`_)c0tSg)C>%7s}GKiS!E;_`?gHBA3tiw6U;1!xV9Fb(|O5?rk2L6 zEVI{aO3s%yl3L!YPV6bKTS@qtDo?hi#lE4q_xQCT7PbXOVkl1UHk~9EVrD#T)}B<( z+{o^K)B(<#dy729K5?zY%6GBe=QefvoAqqsCHx|)?_L-sk+=cVbNI3K`SJ(HYQI(P zxN)U9aTj+x#4r5`- zHYiKc+(StiQN#hAF>Xt27}sJ)7Ta>xNl9h=O46(cL8PrQ>O$DT-H1*Ff_2%=-kQhz z(0XtEc%6h>M1GW41}$MsS@tCijdpVPPJJcgtgI%rtysySJKH%+zI|!pDw1TGLu|ykha5k5?dN8xI~$pZ>3_(N zvtb?Yb2W_@ZTX-_oDccnKXUW?R$2v>|@UM`Oq%WotP? zhqWFSO!I4cdo-}3=6S&8xa6u5)h{t@SZ8gp``62YjcJpB2GH^N<6v0VW5rNM{r6X; zQb8yS^%zr|OJY?LZIKMv_R0Gj?*=^{*k(?1u@pX zKf5z;>CUOu#hZJq_Ib4y_OP-FvD^Tij$N^GhJfV$ChyuIv}+JNeY|8c-y>h=e0a`+ z)CBt|O7piw(5hxZvpj3RNYw^&4AP-I=dvDMTNBgEwyg`R=JK#TnvIzK5u4%|Dr6v+ z#Wq2AS+Bd7S9+-2Y%hnq&mKtY>!1_qBGZ~Y?%x>kQA6sSiFM`|hF3yuVO9ngGIa%f z+g@!BiY~kst1(~;rny;)KF!zLTL)uc#0D1+pS$3uh|*kgi)Wh;p4V)3UBz6)3h?Im z&x}g**aCIJtQ3jtqs6bWx597wN+3!u?K00vroQ5{$*IfQQpz?WVg|Rs{06x+tDtDs zWM=36$!p&NF9wFlnablfARjw`Z9WzGgUB>Q-B}~-c~)JNe#{vtw4s8o?g}A6%>mIXI^S>25r9)wNYNJ|$lF z;6C!oayRX1!g#%|dQs49$viw}O$rxzFajZ|EXVRgq^YinpqDk+%tnLxq(MAdb(AAH z<2a24o}g3ZSk<9ZUyM+fY|xHRy+5{5AOCOQs;RA^?feM3>VSrGftdt)?k3fRmeA6h zYo9w|m+dA&|K{Wo(%CmpbPzL-rl)c0ke~^~{>5x+S@T{6bJL?s5yh25{`B&4`RQbS z;S%V(=6j*qYIldb@tZ^5L?_XNfSR1<>AcfLG1!{B79-msN&$V=JL^5rbcL&9fQw1AA0O^+Ni;H2th zIehP-s+HbdB_;o%6jy8pZY<@-rAa37ahO@Gd06z@3@91)q}S^b1(=PA&8-?{d|tm& zFBCoK==19C3v&b>?cIhxc$RBn_0E>dDuduROvz9@(_mJ$uBO8%lr@Xr&y7P)e1F*a zd7K4{5Ur1XH&;nVtmf+kSbw1+^)}vH<@&fC)?FCMspHS5DU}EcxeKU9jDAT=$lKUi zmfaW=t$L7Qmc8G+V8%yNx!QN-kq#sEjpc4)ji_7Yx_ow7lW{ODzznX5_ni32v%N&F z?&Z_4bTNui5W_rwjojQ6t>#joScH{kJ~}T3uIs{ZZa|p%BS7dJGc{5UnEB#TB(O7J zIH=TD^J1@l^e~OEM~*{hF)`=YrT4jN$^@G^8|Z?hhO=c?;U;lIL~^GD zEoHm#lT85rW1GA>z=_o0Z4q$91?*FRZ?6U3QumxWi5?*ca4x+CN&;JtW3I!2w87gp zV!?SzwpYBenvHW>JqNf;r*QVayDcZ3Zs3-^}l{RK$)0sl?|`cbOEp z_-|O;V6^nUE)~y{zQ>eqDrYCM9M$J|#6cpicXv|s5cEWgJ!&z{ea~eT3O*Udus3zw z+Q}epYQG}Og8uUjN^y12EM~^ZVL;EuuW)P4Q19!p$FuY*7ekomU8(9T5sbj_8PN@W zHP8Zb+a!=iPqVMWnp$$H+HlfrmmmZoecwzz0YPh5W8d{w2`Xp0qFuaq*=TMEpvkc( zYv9puywOVG_SnVqhn&T}eo3?6V_NcY8ONWiEQp`@-^BoSmsGp3*Meb%dt7 ztQkMY`_K3F@fSqQUfH+h=0{UJM5M2eu9Lcgn%6Qm-t24^$?n>8+dJ(mz8ZbjP;xgUyEuPCBAy| z@JrZFeXAf5;&ylOoW11M)QBURtV4WA4Y}arU?CI|?k>nNQnfKJk=JW#>(5x4^b&{h z%1K?Ez&nxSy^CzN)-}32`c#zBKgN@NSi)7RH@2uD!RtCFVhq$XF9F8eC%dub6ng(gL?|0g<6tU5B^z_^)1WJ~ zIr@0Mi1o@bXe(#57Il+GNs-*>#Vcr$*dEC-iv;I)YX;~6Jv<6at6-y@d?s?oz7S|q z^4nBQT=`;jmbkvko5thg^kHI(hv4)>6TOD-2|25-h4i~d-7zw_(7HiDq696m&t7Fo;ZQMpW-haFttw<4Ms7w39qOUzqo`ppH%DVt~&TnPNk z?#kiCPU4?rCxXO%ZdGU7`}>w; zlbW7=18+HXdn03JAAgG~W@~5AznoG|t-8^}+h*=Tx^=_WZz=WXY9pE|A#^BUxx$~7w?(rWR#|1VnF+Sct zA$e|SKkdvAq!%?u!~VWiGOa7C+x-;ECwqw=H-rDp|0G{?6h5NGt#eD$TE*1s3FE;@ zy90S=XMgU_RC-fqa86AA-U(;jb$5M{!`a+Y#}scrN8DRxZpV?R5e zfSXfg6T`U2D;sEkKKaFIPDefe+Yu<)U@+J54U;llV*j0v0)r&D#cCe`IiTkB{=D5> zuK_7^FxHXp2G2?JK#9Ooft?jQ^x9~3zi?*I_U13LQ~irQUeott`X9QWnxCNq=tJHE zD3<_q(&pQr*XC+Mw}pS$-_cYK=p79m5b$5C`??gQx^MqB7IdVY`0fXQc!buluacG!PdhjoQN#%JbB+FaKzQBGr7;S1=Q z_i2C+x+4doaKYwNonMsXP*`54@k)24m{04LdENbyemHZeQ%5eqWinSrp7@dRKNE^i zp6+uGM~yuQiIBw+q=WsAgeudqUA#_`hE+jb_a0n^7@NbO9FbaF)K9mbIkcnI*KptZ zqYC-_w1Nvl^>ky-YOa6u^g5I&pO80yhSj0oaPjn3Nr=*E&#FPrkMp(~B9m#6p!U#l zmRP-5wtX^wg)M52&-@Daj553sG9eZxb6KoQAA8$Eq#yHyj zv3ll(Jlhi!oN~?fdT}+!5)NF@Z!+SG_6hLw4&KQv(dBfl`X;!> zmMu0vyk)CQ1bp4bT%pDjwL9+{Sq7kb%XZ~!QDM*t$Bl)U>a|Z++Ee?_@vK3`Hzg!$ zT-RE0%}$Z}hg0<(z(9OrEbX&(k9pv2EDCX|QFi)V-Z$&QCVrjecKs{cwf>}14Cv)o9bcy@15wNV#%EeS+9kIq1W^Ncwhii&QAZLijv)tz%!KCEZr z^B`jvFjw}yVFgb=bmSGM9W9v^fg4)4HCqw`i#u)#ISECp!YkN@9eWPCo25UV2rWa> zxqja=)Js=F-f4Vu_-a7Gg&qw1*63Gj#kabf3$QptqkTG!=Kgghea9R~ocaX+QOQrP z2N!a^>rt`sc(2B&!vAGk0l-P=8ZDE$iR2hXwb+pa%+dukgx)N<-fZL8NxFRFj*Aj5 zfLOhqVdc-5ys^UDwl-r{^{B_1_CTh4~ROi)PPR@0AeM*?_++O zxMj;QU1{ge#!IQWZTzmDOlv1g2mb}HMuPv%J+;-t?ph@R7mWhFn8ovWq?!GJT-HJe zt$KYW8{|;ll2<+)&~VJzq7NvUdZF7fZ1;aG$uar%Q#COMEWNniD_9l5wMkOqtA9FB_6)&>c z8E<_oSx@r_PIhgzrUE*X0VQ`AKnKa9I%DotcetDgx@Ez<;<+Y+EG@J>XV0Ux$y#&5 z{G;z0gVy9Lb+Qk~d3%;@CyQ^8*MG>n@iY!rc;_eTRkwYb-2!?om!~%ZGMM3T zMs;fCKacGGqlQ=br1b2dh41EKV>q$-|8Vx^@lf~Q-*}-YWhqOtOsGpLB>Og$wNjE~ zUm{WV-Iy7Zs1Y%g>Dl=b!t2+}HhhcrfOV z;eB3bd7g8g=e&dg*{5xsG0jooolHc=c~qnxK|>unFO+Z-gN?vH({G^2=dXj78$ zyMgHwWXaphHu^UWG}Jx#&k*>|PIq<{T`#0Drb7rwhK!2E3-3~9p5UhSLAqOp;)Gw zfa7WBJH!^lu&mdsR7@ajGaCxee|g}Ozb`Z>2}8YDK34Qt zp<0mVWjB9zrmCQ3DV&@1*oAQk^_$asb5Z>U!y5E)TpVjRfVu!ny6QMUqq9;a%1Nn{VR>H&;)P z8XJ}+Exy|}%a6l|#`Nbv71<(v;-^^SP{USv(a9~Hj{!R^=VuNPw;uW2OlVwe=U>$X z?c1j77o4}*okff$KZF~lkHxFR2X_LQ+y74_$7ldsjTjM!r4Le-eYHL|Sl(LazVd);xXbFdMB0O|YbdTwSB^s=pJiEND-ShD-K4F> zcuWX-hs_cNFej@R&gwd0bdSF)^B)Ra8F_L1fF#7QAwc8wVdV?BbDXwS96|xdVyh3Q zs@BHog&YY|1}MMB>A65eQZPPDk^?5+WW+U10G8K6wk0D_$vc{Sdooba3{>*2ZFt$P zQ5q?5+HWzi^mEfaLB2Nf8>;S1-}=(h*b5_GYmeZ`on$;yrv#HNolx1x-NAo%_P0Vq zzfjQbj7ER6?csfe30w3*w4feR+ z_sTYiGD-PvbGTvp9fCAKn=5e_KAz*9m?+U9$thmYP+F9>=H-#iLqdY_u8} z_l|7>6@KL%+u7cXhu?eqwy!iq6GH+7YHAXQ>1y;d*XZI}*|>NLhV%aS_^GNYiXdr9 z=*p;PR3h{>HG!RARm<2y%sf;D-Zo+wDdnvDPHA1CE%!hRN(`)_l@f^elR32PM`Yi4 z)3ZRo9N3ux4xV58n^gP1E2A6P>aJ6#s3euu)A{SxH-3%YEOBiKeP6L2L>rqyg511` zJq4#JhxAeC`D4|2w^r!|1(b)5i@j+YEx?6$ZO2YC@p|z2KI!wc)Daw>Ij`Ms==tI| z9|5(;>)<1>@cVQDx2b$TDPfnbO+h@*?dwLB2Ii*kat&(m!BJW`j`LTb23vQxso zs$)c@wG3t=&yP%?%v%q8$SmYiHJpBvi0gV3@2c7D{e7fU1$`Z*_82^7&-U{w<1yV( zuL{nuCB!;$yi!6aro>SjFB9he$|v)uzHWnMsyE*Z zdQTnQ6u{W?oFfg~wt|PhX3^(9{+^mEQV^t-PGwQ$OoTGVMZhPWZ5%~Se|db72(gVC z{6;B0py2Ur0iDPFnvOG%7_q7LJKoQi--RDtJ0=<)d&`91y z!Ee;`P-xTs8}U}2+DAu6TP>9;(RuMLZ|w$wPGMI6E;B`G$WwN8b!}&%Uq&?Z43M?b z9P~8}OGWxNm@X6e4>?~kK;LL&zK8Tu$tvrzhiJF@aZPqL7hN?{hv%Tw9(%Bkq}Qi{;Z;(mDIn23P263U7Ctd2!XT3t4! zoHHvEXXojgUFullpIz*K{J|od%7dNMW(;?GZE5LXP1kZqWro{zso93hhb5smC>-

s;T=)ka)aiaVpKiH@kl&~_7}fmwA0-wSuAL7b zi8cXwO!e$BdzqRtY1)gtkXvC zYCMcc4f|^DA8!Zj!;z{$1Y%Y4#ugS6ofYtb_l@}s(()4;XTxnq`SBZjd49DuH7h6; z2{8-fXKy~~g5bZSRA<9HP5gLB%To2f4>x<F^+u0roRhWWR(F; z(LubqNEZ_B3$r`?)g@VaKprnzquO}y43a5DkO#)_dwI!j4WogCTW5VoPBJsErejpE zC>N}_H|#uJUR=ygSznqq((@-(;|@%Wx!+T-2`F<0n`uu^PeIl`hBRhJ>-<*D^5*#k zzZ`+QnA#v(@^iWsX6+ffz*!vZ^ff*$LgK;OmWpJ=HS%v~Di;KoMum}S#j$@J7gX%}Ud*PzyA-4)jZ?2vfR!c~F5h1AR^Bfic!|s_vbgK9gc&}H(U_FMaIzTM8o4KC0<4I8ROc?vB38HviD6s z=c5zQYhvFW_u>=qG6KoPz;nESEhUw*$g%?xB0z?ca#{4{f`4{SoaL7viLl;yAY#Lw_f(fP{X>7ZeTqJ(fJ98>0ss+GhL8c;pET5wX{VVev@1o z>Fnqz-RL=|qcXEQhaIH-!GIma&|aNYTa(;e-$S3DZu~YrDL?91T}$?{VoFK`lte}y z-cx@ko%6?jGJR8m9D|_hIk45L1d#ag&*QBs8uG?~RQUz3HO$bMkB$pK8-L5|4A_dW zIXt%*+|Ek#BR#hCG|^j6wz)`rBWq_m`EKJ_AlA6c(G6*wOI0Y& zt^KSKHuA)2p)05kXhlTiT5aepe5dcVTSBWOcE)kdq`gPM2)>9ft6c-9D7tuS3C^Js zWUPU`>V#!IwDER}QD^Oj6s7BNHd(5r!&^`LG-UnK8Nae3j$-wS+6V>LsE78G*FVGa z0|s|*T)HH!&MG954g63^oAc?uV;7{H^UZ*hBRE$Cc+WU%5XB-3Iyxv2jLu9xl$las z`%P@zI05aCu$C>Xd)f@x3%lPB?syZcwUw2Uvy}(Ac^||Ko_3QC898&os+OsN^b=CB z82AjL;bvi@`3bSqfG?9lo*8y&w7l|uabc!0SyaEI7lIU-q}XbpI&&nuwm&M zblIZCt1JRoMGcga$9_3P8!ICAaXV{Gn$^S1dt`g?i|a;o5c{s`T&|j2?e2oFIcPkFVc%$MQS~ z=#Hgg)o7z}JyB1r@ShVJDLBg5CRWhG7;(5yCDW4K@~#cMX9;WSzTAh@^LP9GJLWlY zyyEARv96h~YWLEd8;qJTm)6a2KNKZvoQMb3Pq&?2Vse+o*VIm^^;-Es)dZ?7b@{F+ zHtg~WkGZMq3U;og_jo=Uy_P*uwE4kqDwE==@AcqcdfkoG2P{n%HiECQdp5N8Vb{r;^It6dYZ3EZHqRPqR=yrqe|;); z1{OC3LZRQ*&?}|+HD+QWml%wJX!gxxAov-;6r85 zO(kA!s%o-tJPNMI zjWe$ki!D(XiZ^)KgmvLMoUiSgUu%Ql5)Aa*Z5Dsf@5qmt1RQ@I`7WrXx=~~LV9f`# z_w2~|(`o#@~9xWg;C#F>pt}z(hw=HQT9T0{apV4pKemC+BK}z=I zoG5?c)%jCw{47V8ihK7|)#jA?x@Dbio9Zx9oot4?$Sc?QqGw>$*@kXDc&kK({dEhi z9N=RCJs-p<^&{HgGC_Qvs))hp<_7Bhb8v*=}NA_oV<3H4rXJpjv+#$;r?Vn3u!|UmA$B`4fDe5yjR`02Y zhe0aa(;?VNN~Ak{AWCIz*K__F$8pXCGbT1uMZ7oG$jGPyvso&BZWH`i>q}`$Nuu;N z*V0|B>{?T-^k-Q$KaoyN)UA7d6~rKZXt?WXXE(_K&*q}JiLcc*e584v2AF}*Ttb)X zA7{qcl?+V(;hQFQN{K6(_;R}%JMl%~*wrDde9xSRP$#P*WqU{HJ;-jct92{z*~!m9 zU*=7vMhxz~w^Ji5fR)uAAKMA`Ygyh|>wo@aZ6Ry{98>?^k@?bAg;jIJIjv1qXX!Nga99+26oH6 zYXCg7WpmKUBWZa-&ak|ZtS~%=uirB^cIf|k-)8TY%8nHs^YNYI_)q~E*rVCk5Ug-< zy^-FKGgfs_*aqW#;|7FL;jf)B+4HqZy+nI>>Stg9XOti#xl->dVuOhRVd?nyu|H%`TMinTq|E!mz~h(%%DzB z>Wm##MJp3k|7~e2R7`SOxD6sTPMEwwV*f(Vv~T3|6_EzLLhgO2aJ;v7^66W(>P@hKh7KP2TF65F z-BYqP2`HiQlr^Oar%k(k-HwAWTToOu7DyO35tYOBGdpvgSRPf!N#$B%8nm6qSdg^Z z!Zn1N8svxDyN`TMaIE*m3c4c>N$tgzA5?-`)%Qc-+x8%SVAmX=VT)HhS~}+&}%S6I2&s!B{y5uKF4U_Lip6o}(J&s1IXQ-WacZ(qqz})mLH9 zUG^^rh?b=*>~D0&ug@3ParbO8f=A)xTZ=#QyO7x@Kc*+i>TvscKh zSNV4a=0EmZT_#vfK&6=~x|Tfja}oVJhbKwjma5{Ur;D}`=vm{aa$M0 z`OVVAM=9G%I_rNCe;$)hHQeIbYAA&LHlb!aeVB^z?>r65IPQAqQ1^hRkB4ei@zVRr z^`UwZg8h$gKnV%(MrIov(kK2;$+yzq2lL@IqWQhps{Ctb6Im2^?*dcomDRj&g?V}M z{l5yK*X+Pc3sC$)8hlq1RB}fc#64J!m?ELw;}5su=a!~wo`v}O`=hQq9>?}_0T^Y; zM6Ax*eWvAY#EPyubyIQxUs~`J+!aVOQR8%yXE;adA-BolPaVM$Isrl)Wmez7kpZrR zz9J@-HL(SNkO%pj^gqj!y)=RX(B?iE3C(qVRqoj-UQ~iQpkY_nt#@99QSl~us}H+n z4i1-XXv($ZG5nLYQL63Y$FJn%<)ZOwQzBrO1NzgU++F)pj9gnElGvB?kb0iH)6u_T zSg?oyjSl+)NP?^X;0371(7sTqwnFUs89OjnO`ooBMU#Cejbk1hsNNM~3X&kDP2_mH zJk#fz&CF!|sxOTz|MMxDl$rTO73*7sN;AYd&qXk#VY#JIGM#}z`*B{PGebE)JlONn z<(D+uN_%{+y$v!7>OBm)B5j~Gre!;JWHU6;5mCnI1ia{lV*N zA-|JySoB$g#kN%NDsGLFQ+q>|V{popO0Klbm%~&{+N*UKQDs%vG;pULgSPfnmuWf_ zS|0KD@p;loDNAr{Jz|d*`|a~p9DfY==l@FM>p17}i`B+fSID!iYRTVDkQx^k=Q(fB z+7O{pLEop8xftx9^O0jToXCqromwc_5JUydH1TPx+3jG zy0O+1)#}!@GyaXGarzPUlzPl~QQ@v}PvyqQnzIef9QAQMQN`b-)K?IXz{CB95GfAv zHbK=6rJ{1{&n{*lS`fQhi?0tk4XHr2pzJZi-!6?CiF~*Efl`kPI{0SuU>9_5(PwOJ zs;c-`Hk+0P(~FbA_P|o;v$9uYeP!b-?ackt;!5j~a~{nOffr|!qf(NTbC$MFB~QOg z)`aNkqCI-78eU)Thfma|5Wy3VlrWZ#pnih?%mk8;+YZ#5+dt~4Bmz=+yh55Iowk`* zdIvRat5v-vBxV@*`lkN`7V25Qbz6{V$fvmwvC0)b^I(Z9OLbl~bDkRCdR@VF=DQu9 zOB$qN=i4`ZRA<^h9*30rKV^tge;@yy3|su}-tg)KF3s7Za&6CFG=fK>!lZ7Nn+$l= zR)8Zumk#iLJyq;_F48AEO}Eq!e;T;-44}ak9c=1<9*tMPExvv@?gmsKBRpMq#cDSF zm4~gMsa8JyZ~f-h-O8{r_ku`q5{J-hhYe}Da|Wu5m!Utd+q@(Zdb|~G>q`42cdxfp zib7WrKu{uo0@2%mmeWjekA6TMIVW?aepgL(_%pg2sG`k3A2YGybUJxt;Ty4X*mq)& z?srRBR`n6d3z?`fDXHqV!>uO92J16)#^*bEgB3WS?#mf4;O6f>ILPSary}I zJ*-CV$V8d?U3OkUEdILUw_Td%0C4XLF^M%xH@&vo`Q6JNu&X*%!+Lon&7WU4$uIlb z6}J{s{acR3_}p>Moa83Mh`@SD(FXCKnFXdi1F9IO>?WP7Rl}QMKC!w}DU_!V0~qt( zRpgY1+(dB&DzXnk5SIxMfmA4y+7B4j8ehsTbvs#gTSA$Ib6H;uXL4G{Xd(03TEweO zsi%6UkHuI&G+rmoc3ijudPNQMG`&O5$9bcbt@Wope=`?7-)P}v_15#``9e7Bkmvyd z2)s_i2AED`moURytn zNz;29jrE)U#tu6v(Zf4H8^Y&gvVwbY#*DB*$KOcXTQ0-Q;4BHT^jEQVXI)7uXVZ3&LoLO#2ZaDH!D z9O!$h8dJ%srEkSWoVnN zh`d{=VXs+a!NBLXBi<$5KFR zr@X(%nVwF4;jJ_-t+?L_?BGY;gS;aRP>o{dWlk9f6yAoLk`9N31bUv) z!;0mi+J?N&qa1{`f=IP6&OQ%d&vr4O&asPoB;&+zq$wNGa@w3uwhX%D9@dZiZ$`QO z&0cvQ_?}*6>SRp;707;-Gpfmq;;sJGex<|7f9PsfJi+twpr%?}sB&TR^8f1j?O)anGM1=VC?pOOp79&HeAPi$4%FkbVH^k;ux! zgtFzXn(_Sf<|SoZTnov|{zFt#Z9R?(sY7WYqDg~54%Vl`LE@TIWrJ(Oy>JC>o0}o( z&tUVt5YBn)NGZ@0u=xG2zoJ*fEnD@Xc`l)1w?j;8ugCY?ULZm;y8^Nc9G|Pwu1Q;| zMOm1eyEeZ$G4yL}R)oIUlkCc2Nc!{X28p`Kc#hOo^nfWl`8AMgs|TwMG`8)h&5V2~ z0C4-9(Qt}(I^+n#<4TA00Z%Y1ys=8^Ed_EZP2))aGXD{F#Fo9vLV8GEr$4y*sJM6C z=Fkhevbpr({XTraSHY5=HT(u&t)3bI^n26+lUhWayco=~5(oS_H_qHQN3i*v;)}aB zVH_0Mo!I%2OL^iYxt-$kiJeoK1kW>(Ys+P&7h6X{Y#p;ywCGRiDw@-82Wv zvOYcPItR&`-46U#{_2*fH0q>h!9-jNz%P;-L7_(Xs_f8*KfJI6PZG_}dnc7s>ehQyGXo?c#SV5Evj<5@R zO$}#B@!UWyKmbgn-b0ffyF7-jwQbnxPIjJD^L81J>!}c2Tw(S*6JT*d3vaW%@d9Kq zVRMe;dHyLa^2mAhqirt$I=B)LH6YCj=u3Jb7*M?Ia6n<6#5`380DWmW!SDAeDJ+Pghp`%hjTWM@;{I9A!5x{@x9Qm&W zgGY1jr;q&CW`f*ZPA5|a+?_U9%yPS^T^he=t9Y9Dh*^tW{1ylP$JXBJ4ffG7;opX8 zzq6buDx2J6r#n-8ubb4lU7iU^z(lj#rFAAte3zfwscuj)>d;;NZd9=%s)ZGzB)fMZw=d1 zVz;kuoBFu(4{wwl%N!Wsm~L28y@q7*%NMp;l&U+|Wbs&B>!;`gHPJufb5Mehg{~v> z)FU@!w1KD!;-VDK&8MK4odCnvl;?eZCn=jZ)Nef>Z4Md_@1We)>PDS8d%K(W2C{<^ z!&#_=;sTbYfDwkrDZU~{@`5c}gvIk;+CVZRFA*w7$r9rps!qHUG!KU?Pa(Z->|GOA z+!0zWoe@vE+l`ANDhieY0E}&E$q0~A9$;Sv+%I=T0Vh9xN_C@&kz7$(_e0)gWFs^? z@VBAvIPs81s<3T-Bj(fS1^`c|Gr(s>Zl=g~{w4OkGy$Wyckn>5m)*}EyzIi-0)8tuCp>(fV|Ue6-wf}T8Wy$w zCK{7sP?@DRuS$EKrD{)-h`Igr;L^gu+haaWzU!=`&v;1(;|k+9hmvq?>=xp>(&*Be znHwSmB`K{D$7nW?^Y8deGz;a;b4@9dStVF=`wON#%U$fN7s`UPdf3_7MkopsA%@t- zyWLovshX_(5nKui>R-4*5Ir|$aOB^gzt3#|YTTknFf0r6wHSb5vJAo3i|T%X{nx62 zVO@n4zp9rvyaRp%_vQWsC`$;^$M}HgE;skbCjd=*_ZMKReJFzvcOC|dN|(21!u7c_ z?|!+L`_C=_<(R{O-jo-K4FqxEcv?>hRJvs7zA@_djwDYLbVd8&yPuu>Q)U$JNrHkZ zwDHfr#BP0XkwRC0Kn=w@CU$Llyms%TY%x2gbqdW*gEgb8(Ds-3($)rIXNm{n(i7hd7JV_K6r`O%eQn}ePkcV-XgVHbx( z?rDzG)MgKzhYy;u9F$=*I`!0ejcc+$Dc?S-7)Wf|CclpH2EP-z`LrA4*U?NK8*cb( zXcJAx)Y`i?n(@acxDScZ{}3gl>3{h0j%1g3)CwVh`6Kn2w9<=DP0)zY)wSC7lA}R& zpkULuTeH^T#-C^PSKxM=i-Ca^Ot0ef-NEbEI@ltzbU zC-H(b$~-a7Z+Xq?`KtFNB4-TR;0^!u%>j*5_VWSvofH*h!{i91WU7sIuQ;~h0O5$} zly@JUJrMBTHn*sNK~~^-jGRuF87e1W5tBjKRPY~+kY55YTv-(E3xMg(>CXu$q_a%f zHEH}A12b=~U;@w$#Mj}hWq6Ns0*#fU-bx@~{6;uSH9*q=kR8^PFXAz=^}|8hW+M!8 z+Ifk5X{7ZDaI5=8wG^XXZ~J%7#LPkMc4&vOjdp}o?Q5t{9~5?j%W%bZ<8ZK{Qr zJq=g@7a~PfCP@@XNX}Njfbstta=b(wHy55Ob`OpY-NyVW(R~qu2F-m8&`X zCs+&Y*I>?zY4%FtkgGJ+y{iPzP`SlKiS%RQHZ4gcbi=er8Hi4%Blcu~yd1F;l?M4E z<5E;l32!voxx{c*Yr35u;ZB(-?bn61aXPQWnly+E_22KthM3rag$jH6@&I~(szZ+< zQ&Kz-gh*g$k0ld;WRAXUmyRl7YTi3n|D$aH*J-mWKA=o>zC^dz-Y=GT`#o?)LATK} znHu;C;p61D?+kPJ)~BOLyZh9G{GO+o--7NhoQWa$%)Km&3BG}m!sI)~!mfSRdKIXJ z_4hMpk2}7o(HYoD<&cV)n1t9IzV~#g>fT)g1 zs}2Oxx+B#Ccr3P)Tig!Ps4$40!(RxZPPt&d(({^#olAreYj%_RaKwqxhr>Oh*8D#A~LDV{i^hz{yO4_nNjyab5GUNUBH{>{LG~l>8p_ zAToizMnTvjc3XQ6FOvZxxJ4=9Wl+8p@QfO^2VdijYnIQuuQtc>zDo?Do(UyPX*I$OIyY)bgpa7+ouMQ&B881Enl z@sBn1Y;|m+TlUC;Bwh=kdpZ8S7sh5UCYarY6ZVA%Z-F@X4sRXXJ3y@;ysrPzV6|U* ze`VOjdj8;sO~vmhFkxYY_JmoaPmc-c9*gj|G!9HmZPYYYhOmRL4$b_(Tczi9$~&K^ zA$zF^2ej}H75XDJX2`?bH3yD<)mfeXo(|vefN(!`tb&h11Ef9Or|^KyGm18iw<@f> zby4VJDoCfY@=lq5S-H7C^RFMG#&Fil0oga^6a9qD1?RMCAmQ$^U3z=6!;!i;fwM+q zVvpWk-dbQLMiaOw0!HT!Zq?UyLF!;lvZNGU9@2}wpsB8g1R*@;M-Ih>ZMj>s(|!B} zqeZ28l;K7$&oY4MwYxJ@8GsXFR3t<&>o*$kxDdOG;GcAh-G{BA^#_06paa#dtgHYo zOBxpxy#xT{w;F!W+j<+PTRJ`P-edIQEBAR!dgm0Q6Hp;}iES`d%j67WfN>SjnUu*Q zD+M4P0-T&w{kN|_a`(TF=vje;CBy%?win#Njzwcwqbl0if*#E}v`X^Ufr_uf*Yf07 zY2Fb-Z!n>bT7hBuL}_irsX}$uFyXk?YRCGTq)t9S5wF3CP7N+=??e>n=ujn_?%xStV#9 zCrLlUMuV32@^yem_aXd?B%$#~=}6@RQx%#FF{tntyOD2YS;f;{-%AWV`nCfh?4yIj zmUArT8w|Zz^*yOwNHD_8egZJg;3HRNQhx0pzx6+0;(xwB0s->dQ_PL~M|>J*8L+E} zV}%+IYa?`-lpdGRloz>1uIf)~9!GD-lCE@V=v3aZ#Tdo~?+|J>EI~rW1R?QWuOZx} zNmZj>G4YA<@f)Xvbvz%4<7Bt=t#CdhaRJN)C7gHIHdCi%lCT_{VJnRi-}jJnSe%$x zBJ}wY^J?aYJ5zI1Pl;f^y7N9y?kKgA47=UnLYB_pW-52$P#TTl%SJIW6t(A|yvKBB z?GqPS4}lgGC==9kP***Zh~lfjJ-bMQ4E0Y&vK*9Q`iZxWWr#Mkzm8}2kec65#P3Q5 zMj_^YF@UJUC^Pxwo!&IxZs`i2(T}?-PuWHKjxo8_&p)kQC9A-)XuyPwJ9IhO9#jr! zS{M)rX7Zi_S=)c2}XSVhBdbTPQ>G z>yy!vb&Ao=tc*@kd?)+Vjn=#~6CBNH?1*J#5SZKMKVg%<{u?9O>$PCuNQO~=!5)x* z8cds*>~{(sEZc*7eX$@%DaA! z!o8H3ai4)Oqe|Jw!;X*CBW20&pl7{O_f)i6GugD2+X6>OirY9n@Ag!qkGdsl#t&bj z-=r91U2W|zjeukq5-*Y#1Xn!^0vfq79}=W$VzDVBQtB@J=hWV^6F*}GNb|g$34*T| z*Tk2rh2W?77_V+dw2A_-?Yj%xC{y3P?1F(Y<=XtS&edXx%Am8PAIH#&?M<2nC{)8C z70Ns3@~Q$TU*E5tHEAUG?gc_!Cr@Dw9$=TWnO@vHeyG3q(?GJRk+@6Y8waR`$|@U8 zm^QH8dujj&5D;383;u&y|B=rTeg&kY3qTL>!@Mj1Sn#8eCU177z9OdT-trOg9MaQ{ zdD|3{1z9b>aFF@S4yho^Zc#Yw98ftjol_l{Vu+c11!37@ zXsgA3m3c1oCi&$8A2)iInRvUK`PoC^-Y29BVKlewHewCwWmc@1IexfR(t(hHF%1!iiR*Q4D$Tc>sTTfEm$EkuUdM- z_RNVFY!?J7ZaAoAC$>eLELash%t*jCh3x9ur0_0tb#NfS8<}0skMN`)z~+ux_$ohs z>)j{YO$C7XQU0c0p3~_C1H1Zum=$E+n41Z7Ff3D0UtjMfG#m`r=<|PRqivd;((IUS zQ}=5mE>d6o4>_a8)>ddJjGs-_T!^9OZHvI-*r({xIQAGq>{je=eH(PJ2C-&K0;syR zh_NQzB_wyw<^1EiXppCp^&NGeso33E(y6RA1C8$1 zXwlQ?#Z^9S{Ao_muL)jF2he>^Z*jZ)-+%UzqXtv2)@YlPx{NgC``7jqutoyQ)ZWb*0>-a5zE9|PI^F=dyAaYLf>Wb`_r2)w? z8IIKtNxIRMx+P*4zS@K`Arl<$9f*V~4ghJ{AJrd<4yr9g;YEx7pzB(2{x(%n;z0Cn zSQ^oslwk1QP6(m;Ce?7oLE1nGq@_0IkT@Ki;)qxo&LjvEs~y`^pZl}T*1j$$QW-QZ zitoTD^l-S!lZsMhVQQiTfLP_|XuYMv2JNh1m8|Lm?$J|TM;SIt1#PlQt48T3AF+ai zgOAtl&Z15JIs~Se?F`^cGoML{>_9xt_?PiiN^#{F%8R1spZx*(-jO5V-4`L~<&4U6 znLmWHNnrxWbCr@EXR@4qe2}qnD2n%Nn-Eooi~tXe|gq9N*=X4-4lqMx?2Ub@Nh0MtSHItT| zg?_fR%>{V(XIdzVUn1jWFNzl3Ikke9q^a#C)T|!ydkkPpDff`Npz@)(B2#On;b0co z^TEmPHKS`)BuTWFzkelsH)I{#Sh}CfO$Qo3?Dl}8tI9L}N2UIWoPQ@zdq9zI7<;$; zf9c{Yj(}!inrNq*y{kSh%8A02B|i*B=CN^ZMvp0JeQ;!Ec4dO<^i&Fu)kmHadbw<| zaaZPw+K{z-a}uFIP%tcEUE{LK&>JMJSFPv_KZvN<>mqSYa~G&wMe0X$_)?^XZb zl%Vr^8SIHRvwQ27>p(AcaYIc$wS6*kmQN1Ym2af~67+?5K387$_44k`$0^431|O);SkHgynpc7e@6xlIFNuY&t;kauV5?3Qn#JUY{n`wJ$`l2!a{iTS!qV;d9JzW znb_IZ5Jq|6WK|VIOBh5JKMkFFkDnTQks7_BpFB0y|FU3&^y#+B6`hRCR8`SF&Vwiq zpR&80LT!OS*tna29zCnP$+2=*YgsfQEtqmucMKL4d@y&&beSJJvy7vuCS(iAd9c$q z%L#lCR4lnl3D##2_ChygA>R3?QLoIav^x!9RPGD6QIXq1`Ugs2U8?u2`jl?RX^vBLMD2=>y!zMi)k^%cqqQE_;c2`3M04Ck z^=EIvFbx@wDq>os-&~}y{R#TPVV^qs*@q-_9#DlB`t>h?2*nxg#uv8vRIA64Lk{s6O74D+mrpL|* zTv^)8(Mx7;MmHf_rbL$xP8`$eEmhYkk@>6!o|rRgsoJuX5i;Sc$Js*eL3LU(ol`WX zL|-*Lv26?LEoo)s*lE2=OrqpG;@P%lpUYej<0W&t(kp^q4JSA@J}3P&fR715VWsT$ zgL(CeI)tu$2)@JiwBx!I8WR9z_M1kH8Yt+?1Qib%q5E2 z4@Ct_DZ9GD7CvztOCZ-RPW6^WPeEwIvDXzhleW;8`o(|ojrPVp4x7S|aGKv~HJdIM zhQn9RbJ7H$eoS^kxhv-rSVbv`N|ykxK()@DG()N+p{td)yT=bpFgoy^H#Qm9=B3yD zTHOS&Eh{hC_$bQ)I$o=llg}zA0-YVgF^2==uJhsCpGmn#nJKd5-^nb+Z&tan<;)t_ zY=3*$mPi|nXSs8I0so8!%-pf)8+RlH@|r>& zYs)Xat;a{?Y+ludb(~@sWBbW&bh215G$7_=>Qe#E)l)j%_K~#9B82m~Wf+XXB`-85 z33lrBX*5_Zn8xRc^OU}$*49XSq-4(GspoUg)6H!H7&XWcaoko7-<(NpXouy5(mHpF ziPD+-iNB_XGl|Ni4^dbvuJzmP#5;lT-c!jKEVgb0aq7(!_MPYVVU&$m%tZ{ny_@_+GhWy zYa{#Od|p17c_@v0vUXQ*^V}cV;ZLB8Y<3L@Xrn`^5>pwbptPVh?uMCNeKbi8s78-X zy^;VJQ}c2^>XKg?L>P<||N5q6-q%PVk9GdcYQl=Ap&mMG9gAdyl&)kV-AT&8abcT5Q zXV~(qNc^aQ>!Vj70xhoY|`3?1l>P@r*r?n zANU0F=_C=cq@iEpKUu-n#}(VGgz_F?e8f4f=R4uIB|XgRvj4)&MC*%e>jmI*&&3-ZbC0 zv=O;!!QwJmazcDXio)xshzVg=?wi`!8-6&x4M5u4K%4sCl!&-UcRMy@zYGn8nFF1u zc85u%Zb^kJjm_p;9mu(Rz{cO-#~fdGqJj%3VmaXD+$BT7M$<9}s?X(r;cF9++(>2J z`yAtdQ)wV7uj!vRaO{;Z1GMQ0SwFM!zBK#wC}@Z^=Dsdsf7;<#b{F2P8=3Cs*IvFN zOz85zQyQN+tr=U}6_lN!%imyK7ZDJZdI*}^;bq$Yt*v@{m-ToKmikB9(EyGrj=f6g z=c&Z$mnIM|9KzP?$#3b@TkLR)Vpw&7+EU`-miCkEdKTGR!2;GA)hk=qT~tZ}J(d?1 zzRNsR?@_L;$#yjdY%M(=@bKTvx&Irm{_{Eq1+-G&DNpYIlSQSU46YQUoXPrRATq0_ zM`dDCDfbfk2ys$$M!8R(ly02dV@B5X)~yLyj8%@riP#0*wn6)D`#_aoHQ2B5S9AHK z*?&He#_5p+9#0c55&E+bs&@pJyL9cqpPQ9Q3+g}P890Q{({dl)_b+~UjeJTu28#_1 z^R;f``PIWLZIGMxC*qhSmdbEUW$plWJ6%8=1mAql+F!nG1|rra)(7ao zKt>BPxQ;+x;0CQQ>YE~MgJJ_|+^s2hzcne0!smK+1{+@MZJFAorT(2YU~RG#v|T1z zJfmAW4+Dto;_-V_8~Wb0{OgThZ*33PljZytgqclxF>@M#@h$yJ4;AIBEAm*df6dZD z2%7y7^Hk_|EUl^UyIREwJE52yO+`0n*V3tKlFMR?Lq)eJfoMTYanl+#N^H%QnH4A* z7bQf*PQ`9gUrDd`Z^f>`IFIG73`oS?ISs!D>4cyXhyej-B70eUI^EnsHg`LoAI=co zOlk)R`EFf(W(B*1OH^(YA#`!B_1A1E?m$V@8%r}+7&-%NM8VthPSHUBJG~qT9MSad9EQbFu#mKNr-A7!KLmub zdpP146g0oEC<{7!_UveU!!|c`L-;2^7UfzBV&_QE>r`KTi5&lCpu|Vd+j8vQb^|nL z<6qhvLY44RNF1&6JMh||-FH$$!+wMB}Q zmLUpBD3NW3h_X#m6j`DyStk27MyOCR%9>?H_7)*q)=6TLVPXo|hA>067~7b^b6D!W z?%wBqKey}t<9*-f?fRo@4o-8P=kNF}-`{$u5>uOma$_S-rkPY{9=*Av6wwdkJ>KJ%)yVMil;o~Er7hK3e|T;eC_DGYQhm;{ zjL(^a7N&$#JvE0@jV=U%%3+Sj5tJRWP?6?m+#fv!r$`4kclor2a6V{d5Gk z>VS~p69{&r#A?it1nER!MXU*#rLL7HqzhzCpsuW~cu?ZF77Wuloq%WuCZkk%78g#DV@4m9qp5 zq3K~$3lwqUf*t(9o4Z@Nhg&Vt1@-Z=#`1}Uc@6S)4JUga2sg)$E=BSE_o*wxdaYXj zXuSC|2w}wo@+13Z&PemG1}$ukupppwPdNz!2NhDvTyB1;3hy?~NoPj;tN8GM%A7y9 zU6wJ-869;r-X!HWyTB%BfeI17BuQ*jXctHm?G$J*N!BL_I`djUV|WWY-b*%L51O=U z@E`}gw)*B>!LBTYeJ4@4^Fg%6V&uev)Aj}@J%WBtZ{RVHX&b!BNuuYitta$idd)ju zosl5-KAbvv^lR_KNmJ9wOrOG#j;WiA;d7C+GnHf8X_tI3wzFOw**^J7-}dCzl#v74 zZ&s%ah=STKDFN;E4;K~jag*4R+4GQgZARRghN7aP_J5lfP(arhlTep+Ogo85n@jJ4h6(@qhc!ZJO_BrB6N`$rJ>lADO_c) zx$DnztAEa29gw`fRPOO-f&9;mAE*BpcsKZ_SBvvKaQmS`hu%Z&jg5{B^R08f)0Z8@ zv$A?$K38VNu=_8J%)7UA&VY&Jop!}nP*-+}8NSRhOk`@RrA>#Grh|skA8_)Q4}WbJ zSf#ac1n1s;`*Md6vsorSBW}G-?K@SDJApuD^UBsRJ^%eSB{Dj|T)~ z)=YcUiDK}&gI3J~g@>j{Lt_t*rC7x!4WH72f&ya=jrg?lwp!_rUW?~GH+FL{*dcN` z_naClvUww0kiCs(>s|gnS;UtcRVQB-q>*eDm^rE=u_@?R{g>ob;+ObHc)9_~nqDJ%?AYRyO8a7Ap-{i|0F zK6tF!-Nm@ZF`I;0?3qROVoscFmDHNNmaIJ@m5ajXTF>8{c zOngD<*cTfB=CPfAi)O?AY!K;a+nxTkoCCwd`B7rg?WSi8ap2eO&o259pSey^7j0Yi z9XeE{>87!%Y&gkR1MVucR6d(e(#>-rDit*`K17L)fJw~H6NZ+PNc$fHTAi36ky?Ld z&C>&xs>aV5Kz`8b{-z^wJCpTIBrP!^YE2t13w<>qRJrSW(o|I%h?%hxW>wW8?-Z}Q z3uKO+lgb=E*7#~2^>N(@e}7Ly;5m)Jrx!jnaZb+SVPL|{Bw5oIEj`Fg<$tR`Ah^kP zR9rbuAFEnddJf*rT68CdYD%V#%@ix^ zyv3ByVxi|I^ggb3tddRki|-5>VVq+yJMA!ESdJsljn`6L4Ax~2oW67Ufr@^6$U48U z?^HEAyZdf#9>&B3IT>r+qN%B=FI_%a`CVOIiKDS5834p8!k4ohpKG_DoS!6(B)Bv@ z?Z^kGAho}Cvh2UYwnU7N?^!30zkGZMD z2VIi*%4dJmhwll*beH*svdddGw;#71vZ=r1qjkSP;2Cr)H|7YX5$fINnR2w1{oW&< zGeZ6M1p>%JNqaMxl$O}6j&&Z${&g%Rx9@FGqvokNkM9Srx?>%_@fyyhUDdLU^aRuw z=Yjq#H1+IC7F##$O`=Da?ND=8o=&_By@}fz>sy%U{q1Rlom#=N)w#RlBk}dVIdkbF zKQxQ~K2@M=Y7{qbRa^C>_eUt(oqoV}kne2&dntXG@LuQ93HdV_MlIH@a@@w(?EL?@+Ue@1$ae9wu_9N`lNUTWvA6v(kB1iHt*3jk`KnEbUxw1sv$^oxC}f0;0koD9wo$SO1YF5mzB;sN<9{T>BL z>d!5?tE3S3p(z?FGltLJX2$S;*;ha6@Kx-SISZ42pndxZa&z-H(Gj0gdHdQ9-7`L^ zrFdUd8pCnfWSy|=l#HP zO51Q$7BW8LefEV^XNi{3!zHm9A{@SX4(a#M&86_<`aj=NkB5(DiJy%pcRFZMiMg0C z1G|hMA{S_u2oRwM6cq43!aSk1z7mm`^%S878h8kBJTwF8!J3ajAAN@f29$1eWxSc& zE5^1nPOPX~2OS-ieU=bYY$ME~157a#!*+CSfx22XE9t#FZXa}upZytKGUu4hdX@Jg zpHBhOA^+g(k;76D$ITX9hPDvWQ0tq?oGzpgMJ`9WxsclVavT(Ay;8jM@L0io0_NyY zBI&vogVM&Fma(g>e(0}KXRX2iJtF$zm^stiGfIz(|F&da8k#w}$$NyDg= zNfy<%>9iB2h3wDNqv%B5k$I+lgkm=t*e=qq|5SSVSM|J&HcLS@Ga{C#W~y}3eq)lM%pRR1 z6tRoy93N)%Q6M&)V6De;to+jZ#zy1)qJ2Kpnbucy7YAJ0eS1ga_<{p$N`h3E;v>m@ zv}iR=#*T0OH_iNq>ILs8>5s&`4vN(9-{L2sn!P}ytuCk-J6td4x=eob(G0psw$?}rgASgtBfDk)YP zZHw=GP^_L+ZY)LAw3Mkh6Mj3An0oAH_iOg2z+2Ub_O?4K%um&l!P$k4_yA;O2yAePlNO5?`jR2^#x2{G_Ey{EHJ=m(sy7qY5l1s^&q@zmgHD6u&9zmmj=10c!MmX>)?#07i$7&+D3#@ zG+$JsLP}2*r*#D_YMlC_)J4l4dcF&#adAU>Fkb0w>MMBqO~l7JZ55*VRJ3c8X7To+ zZ#D%LVB*e9vZ--c{J7~ur@#bEEqMhG+s@JKu>Nohr>}AET z*Fbp1dcD*ed+|YT1i$V6;HXyrV^6ti#nJi7yE@Pfz5u05b1j>sqGglxt?ysDtbP0S z9F`a@_6Y!1j(4GZxBd*vo7Vt@f|Wv_`eXZwtKeTFbWWdO_+>niTIiC%7??VIRmtdq zpv-&t1;O)QE=Do0eSPU<79dt@$u*1&2XSDL2GSK6*q7b8qa)&?#%(RuKH?a}^W0IS zY#(wni!WXow^h&^a|&hd$%z-_lY@(nJ)CG&tz*)2^uqg8>bpsCm3 z41GKIhi%4ugCkW&w9{gz2rjMm;zFq)7Pj}yJqbyn?@m%2*NOV*4(i@y@dpP(ycOpf zATsJ)`Jlh(pLE8*UB$$1QDNlYk~x=p&m;P~3KA_N*T`NI9jas{WA$$39{GSNa9k>BS@0tSbbHSiImL(ZB z;8xR}HxpURkO->wc~_<}qDuB@Nja_wn#++450AS&1ID25>~Db}^9uxhsD`tTA^ybv zHXWx=$DW_YH)5_2Kg1ffzkGRT&%V=;g5Y5KwZ@#jvI5md9{ELWU+FZ5u{7U8i>p`w zO839*b7&89kdc(UV{}FeTmhopL<&B*W8T&%lbEDQ@HyV0Jy zh?~A+e+K4LUnlRHAr~*mU~Yh5#PSf2amAZI>`QCT)j)7aUhdK<4O$r__vXviU-nri zNENGJ0J{YG(M3SOH2w*PdezVHQmhm}1lELgikr#w(r8g}pTfPl9t{4;hXBJ6K5smfA+qY{6MW^X@jbn zpWU#Jy|Dte*EziRNv+UZvgjAm180d{+m*}$62@N9J5362nxfqVs(p?3AHzvn6c4k% zYPVLR@O{qZcsQ}i8zsH#*p}Pdp5MJ?L+3mV7c~n@4~n9mY$%;>o};G60glMKyTm!@ z^UEnVH)mE|*u1L(h_4=&%`z(ZAmi9^X-yI8Of5poqjAfvQ+$@v{tj5RcG0_KmRIgr zY;x}4Jb}$AY<3shH4zP|y}XgD6r6eK<|dHlTU38~m#^JMXDGJd0Q8_*J-5ALONfM* z0aEC_#C@+>EEtT&lvfIO#_vSyQPdE_p4VV*o*2K!H4(>h<4w(U&zef)ahf zF`?ib9Nje{F1IYHMWJd%1R9)(&F4Z9wyd~x->xgqAhjZ;P64aYZ#n&1sk&V+>U<*i zX4CeoN4VfIS+Q-a^(%gXdv{r~5uVsCw zklZ6Cs0?eA+JJf;CgPR;CMXk~JVY!tvVbaX@V)=bOJS6soXc7Dm|tXPBPO;EY+E+j zl}jdjBaCP3a!PB7<$p})`nc|g0OW2G5=j(`D{j7GlUdhtK+xp*H`adF{>G8hJSzU^ zr`69bWlitPi?+Mrc%m+nd81yPO;4J+<-Y6K2H(vrU1@ShBX0_ri49NpA>NJk^$4su({ zks`ufFj{sv;%RsxCYk)aTfW+N@<}ARW)ch|HR3SnKOp%W^m*cGo@;Z8Z=LcwT$mYi z#t1!K?DCE|5M^G0CrcI9+{X7eboXo*B}(jJm4bZJe@5MY0wKfobU0?Zp=jRVNMW^- z%Wz|m%w?#oz3nB`OD$rTXfJ8-L0Xe2yXU+1l=1`4{V}uS8+~d6`S3tbWgP2&OyberNk(&tV5{(CpF3GbpRA1`i_fnIZ3LyCB8;47%xhQAIH5)qd9k zy}1}?tU2`HXNd*r%YEnV11g`2+APt~8{%h7u$n5LWbd>8m<8}vzd^MSeuVP4nNN!LFoDgQ zixy5N)2BjDWIn$H9H>@(nxYi$;fcVN|{+4hygdm_%9 zYGCZAU9#hwNSwo6>U+LU;5;8!jC^&zOS4z;ke0#|Dc09ojih;Hm|? zcfR+<-nz*FQLFBaJ;hh#EsY8pYEW$!78ujF88RNqexYJgy##TV#{4V7mwWpE{pKAj zbgcEUrAm#|Tn_K93|=JYQ8(K|)6+49AGm>`YTJ_U7ElbuGh#s_5t!a3t7sDs@!4f~ z|HZfSnC-Mpd*{^x^k^y#`X6FGZZ_jMBNUh)6YFtpZij#Ra+FALZ6!N^tVtcDoC#ZY z53ybL)lF%b22}r7s1_G(`h8VAGLe3MlQZZ|d2o*!WIJ3dF=$tJyj}@aN<#77HM zr^zi8_xvw~_2@5elyUveg3*2QG`*BAli)bGa)VE5nU=TdnO0fUxY%&PQj~&wnOk13 zkz0;R!TTGsk~^wwFE_+ox>B+0v#Tgpv-GJ%?x~{)B@+i3Kiwi;uU3J=RF8_?cZ2L{ z6uiUD9CFj+)NEPMdEs4=P=VQcj*!u~=b%yw1vi&hr@A-?0&+G>qvpmH9oJ@@X8lGw zFGuj!dhvA+leS8K9B`B_bbN8V>blh#&B!Wu$+w0J!uP^Q=^UQ*es93+yf22^&h{L% zXA{-BqSlsb-&mP+@_^{0!1a=WmC#H3wImwksnS;LS5u$v8v0Z_hdIPue$l`RYIdDf z(A26qCxDKQ@AK4W(mV*VhQ9WaxYCF)UQ_MoVq$~$5oMU84i4!BfSe7lYzl%13P5P$4gdUpWb@k(dZ zy7erq?410{w^lyPUSkQM!zq1-SAN=U$@+E!EmXB@@1kNX5q9*!#L*d^*Rqh5{P^gJ z4PrsjO&O-XCm^=Cs0IW^7tPn9g)V|n+elSwDxc4@4XS zx;V>C=3VT9v9iD0!AbDYM^qwZUv3}We^ti_vK@2HMd-V;KLpudC$U&{3&lvwMmZ?w zN$p;FoFuFDmB{+f$K98gl58vj0rVPr%2}pXSRS@a5EY3|Ca(9{*2P83ou(Exq#8Cj zq{+hOjl82(>4_Qba^Zf_?pUR|^-SG}0rVfU)oHep*f9lE(7eqA!*7wgXt(XKJv%Eq z!HEA9`$;=eI;_$X@~vv%_}jRA=-u%baU1-1fCmVPlUXI1|1q$99B;6)d~-or64s>* z+t#Y#MP;Z+!ai(b)QtD`dVs?bo*I&R%2H5Q{e+c#nKnEf=$*3()3T*~@rz+(Gwe(| zI_@0QPsR)oC%|hFKVB{09`TjnMUO(7e1^v#T z8=L>!s4i`EHd1wtGB8)3?{0rE@sb(b%(M6S;Md2b!SO(jrU(s=Cep~rq_mk&jxkEY zqir}20gVgy6BvtIl<=F6IP14?P?FqIeZst7iQGmso4rWu{W{tCy;PyJ*Q(cY)a7Hv zE2Ido(yK=(Dh;cHE%p=;9~ns>;c0-qUQ}sAF@;7bCbas-86BV|N@W*{r5pn22<8A+ zy$F(%pHmw!bgRbM4QSs?juaxhUDMp&n!Fo(ST$K$X$Nn#{gD3;V0qoqX#0vf(Dy%C ziG*UIC&um&u@Ei-HF31GG{PKJW(PGZiu_|*m2NVDs2KP5rWF`A73*^k)&>R*jwZGb zq88>RzmW1HJQ~LOZlOGoWxhL^RC8$2JjHm-^Y8DV@&ogVP5trcPA2_r7@Zkkedsix zKLM2=iPH{CHIvT|SN9TTc99Y%+n#eZ4P{@^Ui=zLOfMHklMf_%Oq{Eq@?d&SxWBSt1U?;SjT0_h9sEbH#X~60%PMa;(xeA&2EiTNv(wawHsa-usToqi zkZhAKvix}vG2r8QUzV^r5E(2QAFH;OL{=&iL1D1RV5M(8^bc!=4w9`r5h z)3B@A$w=8+pfyHM#+qGvFWXMm5xc)-kPxzS(5#&P1(`aZmhbiLn)hOJqHBNo(BMRY z2d$Li(L_S_%{PpiZliIL&TpgQW@uPHWYEH?TW`!aEN$;D_!^0}*HLou>MnBG)rIbi zK*B=zP-URxCQ3}x+qV{N#IbvelCea?KHf>OAoan?@z6;HEb2r#YS2Qy$m>uixl4Df zvn=YUnXjx;E9w#X1TMmzrpvg)#Ok3C^E0T0yufyb<}GFZWtHE*YmF+Om9{fDke9Y) zi z2R70qMxzmhB(yU>E}VQE)+L3*#gN6Y=v1>03M26;!?bO4pZn8Xy?Z_9MtDB>&3&c2 zUbQ8HE4X1#kawb`H9mmBG`B|tb*nNn@ZTu&v|ZHSzTVMvbxGfs~4If z-Picy_sSe4%SNN0o3oX~xl6OrhVUeWi98nSwqRcp8%XbuvK(vxx+>8lJL5T>wos)} zjMGDD%%ipq=+70Jqvj&%r}(3n_4vKD>hY>=l=!0ZF*qzfc%h1D6~okk#qrYW;|I9Z z>lG+7&N&zlf`kmNww+9y8N!F99=Ed_bfFeC*->RkcbH*xOSw$HNOZQJEY5ntaO{ef z2hC}^o}04tKOM=%tfGa{6q8xZI)8qa?Wf*AHAOv#ReksRs$Ys|#!F%@=JB69z#l82 zN4mR;>&^kJC*eZ6x3Y}g{O94=#e!!twG0?90iA91ew;B|{yl}KMs_R~%SuaO;+-V1 zz0;uuDYh=YO66YD+TLzM+VUQA*Noa*&%m)G>BjVv3>*%5u(QNPLaDB_KYoTll^Bc< zpqo7phOnz`59VZ<*3()uZo*d0z&Qz6LM5|#ymyesB}1wvWi&O1gt{IXOY)2UW1d0p zobX$mj&kj7?wp+c(r_{o;Z9xbhY!aVePmbLwR~H{HQv_zbtN*deZC)}V}tsR=kg$+ zC-kO!T?X($O|onxDiPUOTkzS{-BQW7r^%qibK>2sFD(eocb2i`{$Oo`<6L$B^TG0X zkEW^-d_)z^B?ULzUStYghdZcj{&9JGHjAurdu}hG0C)tW@v8{L3=4CyfEn%WhxH+* zJ!n`OV~{J37^$0;o#rCk+=MWbk7)piP1yMyF5mTJ=mZtDjpp~5Eaf*+Q|~&nP*qzx zO!H->k%9$46LfzLUj6$j*p({#-87 zOg9M4qf@70SX0T20(K#^)Ptl;3#0Q4DSYTUCdjG3EX~)~D$QpqnL-wRPbTGpuzRI7 zTH!cEVZ+$i1KCgL^wMv#@Ru1|GQeh03nxk`hNSTn4ZK2s4EjZC)aQapM`+DLTd48i zH->BzDKRm>R5;d8r47ycmR*f~JwqhDJC|eX>g2v9hoj2Vpgur3Bc%Ko zED1VdD@*gk{IOW&2YaHOIbmmeW@u8=bbH1iJYwe<5rH`qqYI3am3(>0qcC1*B&NR( z$U45cg)~2sF*rBkGdS|xkTgxQmWw6B7iswaM7^!AWU_v(GOzd|PG+S+MzaxN`2ZEe z{6<~n^P&UkszIbGVPaF$gtL(e41?+Cosb>tgu`%5UGIcIdg7tj#fRoyrvy3mmUqKs zjk~dY^OQ>Fscn>*3?MT>05K3998(P^!sx_)bCk>cB_lakkkkd_fz9 zX9~sQP(!9|we>NEGzF@xZu$t*k&a#bmN;q7aPlt9i?3Kvtt`8v&Y!%j)=So^)&uC9 z-qis?h=k250Gigfi(Od?gFU3g{G!*9g$<6OI8(0~@44dr^&5udS-axZ%KEIQIH8s}J;1ExrL6EgP z_>}es(Wplgw1UE3^!1KFQcGv}(lxwVn0|{e*yF>DQ@50bmchl;-vJl!9R!@Wm6GoQ z^RerVsY^ztM%qiPHAXIvaYTLRcSux-56glOofb}&xXhqR7Q6N45PtKP|G}H@JugWf zc3i|m4x>9083VUg&5J;UPfa zAQ%2v_a)C+t(3CkP>7}9iJ%Dt&f~Z^L@BO03<8gjHy#}C749W2PL|^ycXZ%b*>CSy zwo&KSXrn+#!GE@|lKCay$yKg@D{asK`f}l0mhs?(k8IQETahdtV;!!eSKeo5n4R${ zEgd8-d}G5LXJKX8ysYe=uTj}?LSy|#B|j+i(iUdRETN0XyAJy0SV9qDaH&0%Q*-9*0NU%*tkip%#GAfx==kFCx z*W?zRU?!z5vL+3s{p5-T(>By2k2VS3v(KN~4Gur9ccr`wnH2V;X9dRp2iX2DFF`if zn z9}iXHNLqhXnH|bbEK$kr@H};l`B_D_0?5OeWqFWWqdfTcY^3SD?Lx||X9NlO93KgM zT|r-{T6&;EcgS@L6nrJKFS|bbXdf>v8v{fYk*I5@5#v%??m9BvIXTTJA}Nf?Dp2&> zI=R!T@CHrA$E};Jwoz~kI2_rvCvcs=`3j^xyhhB#PT}CY<0l{McYQx0o5D-eN%cFy zMvEa66)1=+a5Q!}BST@!;KwKZ`2|tvS1`8&z6=D)^QMd0CQf#tW!6aeJFMa91+>&l zNI#JlFHA}!aSw);Spu^-Gb20}>!-j*Gjx{3B?i)k6})5@-iol%tVx4Axd$2GguTjo zUl1n>Up7R}Yd1uJ8a+EV(yU!1ambS=ggebjkAIQ!>sNP)oJV;xk6;6^h@yhxDBYLY z8IXnH3^v)BsCE9z%cT9VR&RlwBFtH@J412Vkwr}$^U`3RqHUaGk|{;hhJ-U*0OZLM z9Vq4~IX0S4Z^|&a*h#^p_%ZqwxeMN|z_67y^Pzki%+D>1TbPc)#r~&S(DO2gs6Iu4 z_dKWp=iCB6&;H`YSa^njn_H}~#oenrV6aMtR`(^nF#>}o5@ z7yfsYuTUlPjy|5m4Jhs>G!{#4*8;aZ8v|A&H4OS*h83Ra8R#ot>~tWD&*Uub^>i5Z z!8-q2D->TVGplh>urPK~$}m2t5suD=gihn}t+MUi@w_xD0KF|mKm!9bNz~}XxeU2? zN%;wzlw$(v$oh!R@tzsm-dS~d_dXkMM!6UfF$NIr*JaF``FAkykq2?IwV@>O>K_WA4_Z>T_@7(x zf!0QXpP2}De*RdFbJz5RI4>znLSE0N>#jt(30HtgbaCe0@$<@+4+S8;YWFnKX~+NG z1^)S0zrX*#4H5ibOP{}HslY=j3xU*2>Wz4@f6ib1eEc6D{uR6YkNCQen%x`=-_b@U zX1&$8MIW4TZ%*jrb^hdAZ#XI@`pIuhD2zR3bKhDhKDX30R>bEv$RGuY7KHpNJ`NjL zX%5v_;J1`@K;kCu+4UDI6jB}m%Qi6^=C1^^20w4G>5N_HFUQGAAgXNt(|?7b*ad;$ z>fhLTa5dBMBe~GFnVr&puNcIx;y{|=#KpQxo0fm&-v(}7VEA!%W%+YI{Tsk#iH#*v z>sefYF2DvJ*vqo=OHOs55z;>$(p*i0{n+dL-JFD?-7ZS&Sx$iLirus5+|{=%*aDW> zN#i?zT)Fy8iQqDY6gNv2R*EvvDr~1ESy#M%{s*_rq1sP%B3EVP$E(DBdir zH$d{T<$}27>RaC60!vesUf~~CuHH!=WavE9hiX7l(I2G1w%s!a`NgHXEAIvT8+$%p zb+ zCFg3+O%m*oWSWfLYFy*Tt0ZNC_(oRj&?e5zvmlG1U+`cx37lyHmOdLou|KX{z0&|l zc98dFaqr{@8P@>1oJfGcD*dj49lEONko+re7=w2+mTK9_e=Y*tEB(g4RVh7J3YOEG z_rrdD>7|g%aUQ4O@h`(&cZ-MwN5WfZDw?O>15Wg+N z|1*&37s$mvDF6_WstNWNRPqa|;r~}CauwVB{A*u;7Z~SSg#V%ye*W$+AO4lv{L3ZF zxDNu53-r^}*VS2+LcUH>K8Ty`MZWS)-Bkr&I!S&oE_ z03spOyV<}paOLPB<)o#exa%8^ zUs*+DH^B09|8dV>5t$bFc}&Tu&Mjp!_|popB)^p__#Xl6eZHXn*V06rg8+M1_gk>A z9tNP=&_J?k6?;G11C}Pxz+bU<$u5AspAL1gvQLTwjwofBTfO4q~E*nn~{Et z^Rl}9EepSu>TlDwYPEmch2M7Jf2Ljd6~~)X0Q`I$F%=%!GN!k2nl>d(yMOEyf%(``~6=&5{F8q>)TxN~m67pL@{@cjf nZ-DU|VEnV5{6A(l79qmPTl2bG^Sai7|IVJ)ReO2LJm9|o+^EV0 literal 0 HcmV?d00001 diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/image_classification/MAE/main_multi_gpu_finetune.py index 446834c4..c5cb7647 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/image_classification/MAE/main_multi_gpu_finetune.py @@ -223,7 +223,7 @@ def train(dataloader, @paddle.no_grad() def validate(dataloader, model, - optimizer, + criterion, total_batch, debug_steps=100, local_logger=None, @@ -253,8 +253,6 @@ def validate(dataloader, master_acc1_meter = AverageMeter() master_acc5_meter = AverageMeter() - if amp is True: - scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 time_st = time.time() for batch_id, data in enumerate(dataloader): @@ -262,7 +260,7 @@ def validate(dataloader, images = data[0] label = data[1] - output = model(image) + output = model(images) loss = criterion(output, label) pred = F.softmax(output) @@ -339,7 +337,6 @@ def main_worker(*args): # STEP 1: Create model model = build_model(config) - model = paddle.DataParallel(model) # STEP 2: Create train and val dataloader if not config.EVAL: @@ -376,62 +373,64 @@ def main_worker(*args): # only use cross entropy for val criterion_val = nn.CrossEntropyLoss() + # STEP 4: Define optimizer and lr_scheduler # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) - if config.TRAIN.LINEAR_SCALED_LR is not None: - linear_scaled_lr = ( - config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - linear_scaled_warmup_start_lr = ( - config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - linear_scaled_end_lr = ( - config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - - if config.TRAIN.ACCUM_ITER > 1: - linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER - linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER - linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER + if not config.EVAL: + if config.TRAIN.LINEAR_SCALED_LR is not None: + linear_scaled_lr = ( + config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + #linear_scaled_warmup_start_lr = ( + # config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + #linear_scaled_end_lr = ( + # config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - config.TRAIN.BASE_LR = linear_scaled_lr - config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr - config.TRAIN.END_LR = linear_scaled_end_lr - - lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale - config.TRAIN.END_LR, - config.TRAIN.NUM_EPOCHS, - len(dataloader_train), - warmup_epochs=config.TRAIN.WARMUP_EPOCHS) - - #params_groups = get_params_groups(model) - params_groups = lr_decay.param_groups_lrd( - model=model._layers, # TODO: check correctness - weight_decay=config.TRAIN.WEIGHT_DECAY, - layer_decay=config.TRAIN.LAYER_DECAY) - - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None - - if config.TRAIN.OPTIMIZER.NAME == "SGD": - optimizer = paddle.optimizer.Momentum( - parameters=params_groups, - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, + if config.TRAIN.ACCUM_ITER > 1: + linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER + #linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER + #linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER + + config.TRAIN.BASE_LR = linear_scaled_lr + #config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr + #config.TRAIN.END_LR = linear_scaled_end_lr + + lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale + config.TRAIN.END_LR, + config.TRAIN.NUM_EPOCHS, + len(dataloader_train), + warmup_epochs=config.TRAIN.WARMUP_EPOCHS) + + #params_groups = get_params_groups(model) + params_groups = lr_decay.param_groups_lrd( + model=model, weight_decay=config.TRAIN.WEIGHT_DECAY, - momentum=config.TRAIN.OPTIMIZER.MOMENTUM, - grad_clip=clip) - elif config.TRAIN.OPTIMIZER.NAME == "AdamW": - optimizer = paddle.optimizer.AdamW( - parameters=params_groups, - learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, - beta1=config.TRAIN.OPTIMIZER.BETAS[0], - beta2=config.TRAIN.OPTIMIZER.BETAS[1], - weight_decay=1.0, #config.TRAIN.WEIGHT_DECAY, - epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip) - else: - message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." - write_log(local_logger, master_logger, message, None, 'fatal') - raise NotImplementedError(message) + layer_decay=config.TRAIN.LAYER_DECAY) + + if config.TRAIN.GRAD_CLIP: + clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) + else: + clip = None + + if config.TRAIN.OPTIMIZER.NAME == "SGD": + optimizer = paddle.optimizer.Momentum( + parameters=params_groups, + learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, + weight_decay=0.0, #config.TRAIN.WEIGHT_DECAY, set by params_groups + momentum=config.TRAIN.OPTIMIZER.MOMENTUM, + grad_clip=clip) + elif config.TRAIN.OPTIMIZER.NAME == "AdamW": + optimizer = paddle.optimizer.AdamW( + parameters=params_groups, + learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + beta1=config.TRAIN.OPTIMIZER.BETAS[0], + beta2=config.TRAIN.OPTIMIZER.BETAS[1], + weight_decay=0.0, #config.TRAIN.WEIGHT_DECAY, set by params_groups + epsilon=config.TRAIN.OPTIMIZER.EPS, + grad_clip=clip) + else: + message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." + write_log(local_logger, master_logger, message, None, 'fatal') + raise NotImplementedError(message) # STEP 5: Load pretrained model / load resumt model and optimizer states if config.MODEL.PRETRAINED: @@ -439,7 +438,7 @@ def main_worker(*args): model_state = paddle.load(config.MODEL.PRETRAINED + '.pdparams') if not config.EVAL: - keys = ['encoder.norm.weight', 'encoder.norm.bias', + keys = ['encoder_norm.weight', 'encoder_norm.bias', 'classfier.weight', 'classifier.bias'] if config.MODEL.GLOBAL_POOL: del model_state[keys[0]] @@ -449,7 +448,7 @@ def main_worker(*args): interpolate_pos_embed(model, model_state) - model.set_dict(model_state) + model.set_state_dict(model_state) message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" write_log(local_logger, master_logger, message) @@ -463,6 +462,9 @@ def main_worker(*args): message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}" write_log(local_logger, master_logger, message) + # enable data parallel for distributed + model = paddle.DataParallel(model) + # STEP 6: Validation (eval mode) if config.EVAL: write_log(local_logger, master_logger, f"----- Start Validation") @@ -470,23 +472,24 @@ def main_worker(*args): dataloader=dataloader_val, model=model, criterion=criterion_val, - total_batch=total_batch_train, + total_batch=total_batch_val, debug_steps=config.REPORT_FREQ, local_logger=local_logger, master_logger=master_logger) - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + local_message = (f"----- Validation: " + f"Validation Loss: {val_loss:.4f}, " + f"Validation Acc@1: {val_acc1:.4f}, " + f"Validation Acc@1: {val_acc5:.4f}, " + f"time: {val_time:.2f}") - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + master_message = (f"----- Validation: " + f"Validation Loss: {avg_loss:.4f}, " + f"Validation Acc@1: {avg_acc1:.4f}, " + f"Validation Acc@1: {avg_acc5:.4f}, " + f"time: {val_time:.2f}") - + write_log(local_logger, master_logger, local_message, master_message) + return # STEP 7: Start training (train mode) @@ -540,7 +543,7 @@ def main_worker(*args): f"Validation Acc@1: {val_acc5:.4f}, " + f"time: {val_time:.2f}") - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + f"Validation Loss: {avg_loss:.4f}, " + f"Validation Acc@1: {avg_acc1:.4f}, " + f"Validation Acc@1: {avg_acc5:.4f}, " + diff --git a/image_classification/MAE/main_multi_gpu_linearprobe.py b/image_classification/MAE/main_multi_gpu_linearprobe.py index 96fb8283..ab4152b9 100644 --- a/image_classification/MAE/main_multi_gpu_linearprobe.py +++ b/image_classification/MAE/main_multi_gpu_linearprobe.py @@ -327,7 +327,6 @@ def main_worker(*args): # STEP 1: Create model model = build_model(config) - model = paddle.DataParallel(model) # STEP 2: Create train and val dataloader if not config.EVAL: @@ -350,56 +349,57 @@ def main_worker(*args): # STEP 4: Define optimizer and lr_scheduler # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) - if config.TRAIN.LINEAR_SCALED_LR is not None: - linear_scaled_lr = ( - config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - linear_scaled_warmup_start_lr = ( - config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - linear_scaled_end_lr = ( - config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - - if config.TRAIN.ACCUM_ITER > 1: - linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER - linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER - linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER + if not config.EVAL: + if config.TRAIN.LINEAR_SCALED_LR is not None: + linear_scaled_lr = ( + config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + #linear_scaled_warmup_start_lr = ( + # config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + #linear_scaled_end_lr = ( + # config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - config.TRAIN.BASE_LR = linear_scaled_lr - config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr - config.TRAIN.END_LR = linear_scaled_end_lr - - lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale - config.TRAIN.END_LR, - config.TRAIN.NUM_EPOCHS, - len(dataloader_train), - warmup_epochs=config.TRAIN.WARMUP_EPOCHS) - - params_groups = get_params_groups(model) - - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None - - if config.TRAIN.OPTIMIZER.NAME == "SGD": - optimizer = paddle.optimizer.Momentum( - parameters=params_groups, - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, - weight_decay=config.TRAIN.WEIGHT_DECAY, - momentum=config.TRAIN.OPTIMIZER.MOMENTUM, - grad_clip=clip) - elif config.TRAIN.OPTIMIZER.NAME == "AdamW": - optimizer = paddle.optimizer.AdamW( - parameters=params_groups, - learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, - beta1=config.TRAIN.OPTIMIZER.BETAS[0], - beta2=config.TRAIN.OPTIMIZER.BETAS[1], - weight_decay=config.TRAIN.WEIGHT_DECAY, - epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip) - else: - message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." - write_log(local_logger, master_logger, message, None, 'fatal') - raise NotImplementedError(message) + if config.TRAIN.ACCUM_ITER > 1: + linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER + #linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER + #linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER + + config.TRAIN.BASE_LR = linear_scaled_lr + #config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr + #config.TRAIN.END_LR = linear_scaled_end_lr + + lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale + config.TRAIN.END_LR, + config.TRAIN.NUM_EPOCHS, + len(dataloader_train), + warmup_epochs=config.TRAIN.WARMUP_EPOCHS) + + params_groups = get_params_groups(model) + + if config.TRAIN.GRAD_CLIP: + clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) + else: + clip = None + + if config.TRAIN.OPTIMIZER.NAME == "SGD": + optimizer = paddle.optimizer.Momentum( + parameters=params_groups, + learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, + weight_decay=config.TRAIN.WEIGHT_DECAY, + momentum=config.TRAIN.OPTIMIZER.MOMENTUM, + grad_clip=clip) + elif config.TRAIN.OPTIMIZER.NAME == "AdamW": + optimizer = paddle.optimizer.AdamW( + parameters=params_groups, + learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + beta1=config.TRAIN.OPTIMIZER.BETAS[0], + beta2=config.TRAIN.OPTIMIZER.BETAS[1], + weight_decay=config.TRAIN.WEIGHT_DECAY, + epsilon=config.TRAIN.OPTIMIZER.EPS, + grad_clip=clip) + else: + message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." + write_log(local_logger, master_logger, message, None, 'fatal') + raise NotImplementedError(message) # STEP 5: Load pretrained model / load resumt model and optimizer states if config.MODEL.PRETRAINED: @@ -416,19 +416,17 @@ def main_worker(*args): # interpolate position embedding interpolate_pos_embed(model, model_state) - model.set_dict(model_state) + model.set_state_dict(model_state) message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" write_log(local_logger, master_logger, message) # for linearprobing - model._layers.classifier = nn.Sequential( - nn.BatchNorm1D(model._layers.classifier.weight.shape[0], weight_attr=False, epsilon=1e-6), - model._layers.classifier) + model.classifier = nn.Sequential( + nn.BatchNorm1D(model.classifier.weight.shape[0], weight_attr=False, epsilon=1e-6), + model.classifier) # freeze all but the classifier for _, p in model.named_parameters(): p.stop_gradient = True - for _, p in model._layers.classifier.named_parameters(): - p.stop_gradient = False if config.MODEL.RESUME: assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True @@ -440,6 +438,9 @@ def main_worker(*args): message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}" write_log(local_logger, master_logger, message) + # enable data paralle for distributed + model = paddle.DataParallel(model) + # STEP 6: Validation (eval mode) if config.EVAL: write_log(local_logger, master_logger, f"----- Start Validation") @@ -449,22 +450,23 @@ def main_worker(*args): criterion=criterion_val, total_batch=total_batch_train, debug_steps=config.REPORT_FREQ, + amp=config.AMP, local_logger=local_logger, master_logger=master_logger) - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + local_message = (f"----- Validation: " + f"Validation Loss: {val_loss:.4f}, " + f"Validation Acc@1: {val_acc1:.4f}, " + f"Validation Acc@1: {val_acc5:.4f}, " + f"time: {val_time:.2f}") - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + master_message = (f"----- Validation: " + f"Validation Loss: {avg_loss:.4f}, " + f"Validation Acc@1: {avg_acc1:.4f}, " + f"Validation Acc@1: {avg_acc5:.4f}, " + f"time: {val_time:.2f}") - - + write_log(local_logger, master_logger, local_message, master_message) + return # STEP 7: Start training (train mode) write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") @@ -499,7 +501,7 @@ def main_worker(*args): write_log(local_logger, master_logger, local_message, master_message) # validation - if epoch % config.VALIDATION_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: + if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}') val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( dataloader=dataloader_val, @@ -516,7 +518,7 @@ def main_worker(*args): f"Validation Acc@1: {val_acc5:.4f}, " + f"time: {val_time:.2f}") - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + f"Validation Loss: {avg_loss:.4f}, " + f"Validation Acc@1: {avg_acc1:.4f}, " + f"Validation Acc@1: {avg_acc5:.4f}, " + diff --git a/image_classification/MAE/main_multi_gpu_pretrain.py b/image_classification/MAE/main_multi_gpu_pretrain.py index e70af1d0..9d829595 100644 --- a/image_classification/MAE/main_multi_gpu_pretrain.py +++ b/image_classification/MAE/main_multi_gpu_pretrain.py @@ -231,19 +231,19 @@ def main_worker(*args): if config.TRAIN.LINEAR_SCALED_LR is not None: linear_scaled_lr = ( config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - linear_scaled_warmup_start_lr = ( - config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - linear_scaled_end_lr = ( - config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + #linear_scaled_warmup_start_lr = ( + # config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + #linear_scaled_end_lr = ( + # config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR if config.TRAIN.ACCUM_ITER > 1: linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER - linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER - linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER + #linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER + #linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER config.TRAIN.BASE_LR = linear_scaled_lr - config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr - config.TRAIN.END_LR = linear_scaled_end_lr + #config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr + #config.TRAIN.END_LR = linear_scaled_end_lr lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale config.TRAIN.END_LR, diff --git a/image_classification/MAE/run.sh b/image_classification/MAE/run.sh new file mode 100644 index 00000000..b679ef28 --- /dev/null +++ b/image_classification/MAE/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash +cur_time=`date +"%Y%m%d%H%M"` +job_name=ppvit_job${cur_time} +#group_name="idl-40g-0-yq01-k8s-gpu-a100-8" +group_name="idl-32g-1-yq01-k8s-gpu-v100-8" +job_version="paddle-fluid-custom" +image_addr="iregistry.baidu-int.com/idl/pytorch1.7.1:ubuntu16.04-cuda10.1_cudnn7_paddle_dev_0211" +start_cmd="sh run_pretrain_multi_vit_l_pdc.sh" +k8s_gpu_cards=8 +wall_time="00:00:00" +k8s_priority="high" +file_dir="." + + +paddlecloud job --ak 560ffe9013d3592e8119e5c7e811e796 --sk 19b5ecd6b8305d81ae27d596a7f2fe22 \ + train --job-name ${job_name} \ + --job-conf config.ini \ + --group-name ${group_name} \ + --start-cmd "${start_cmd}" \ + --file-dir ${file_dir} \ + --job-version ${job_version} \ + --k8s-gpu-cards ${k8s_gpu_cards} \ + --k8s-priority ${k8s_priority} \ + --wall-time ${wall_time} \ + --image-addr ${image_addr} \ + --is-standalone 1 diff --git a/image_classification/MAE/run_finetune_multi.sh b/image_classification/MAE/run_finetune_multi.sh index 7d369a54..283a448d 100644 --- a/image_classification/MAE/run_finetune_multi.sh +++ b/image_classification/MAE/run_finetune_multi.sh @@ -1,8 +1,9 @@ -CUDA_VISIBLE_DEVICES=0,1 \ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ python main_multi_gpu_finetune.py \ -cfg='./configs/vit_base_patch16_224_finetune.yaml' \ -dataset='imagenet2012' \ --batch_size=2 \ +-batch_size=32 \ -data_path='/dataset/imagenet' \ +-pretrained='./mae_pretrain_vit_base' \ -amp \ --pretrained='./output/train-20220125-17-48-06/PRETRAIN-Epoch-99-Loss-0.5566961133140487' +#-eval diff --git a/image_classification/MAE/run_pretrain_multi.sh b/image_classification/MAE/run_pretrain_multi.sh index 940fa6dd..ec9bbe03 100644 --- a/image_classification/MAE/run_pretrain_multi.sh +++ b/image_classification/MAE/run_pretrain_multi.sh @@ -1,7 +1,7 @@ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ python main_multi_gpu_pretrain.py \ --cfg='./configs/vit_base_patch16_224_pretrain.yaml' \ +-cfg='./configs/vit_base_patch16_224_pretrain_dec1.yaml' \ -dataset='imagenet2012' \ --batch_size=256 \ +-batch_size=8 \ -data_path='/dataset/imagenet' \ -amp diff --git a/image_classification/MAE/run_pretrain_multi_resume.sh b/image_classification/MAE/run_pretrain_multi_resume.sh new file mode 100644 index 00000000..5eb4293c --- /dev/null +++ b/image_classification/MAE/run_pretrain_multi_resume.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +python main_multi_gpu_pretrain.py \ +-cfg='./configs/vit_base_patch16_224_pretrain_dec1.yaml' \ +-dataset='imagenet2012' \ +-batch_size=256 \ +-data_path='/dataset/imagenet' \ +-resume='./output/train-20220125-17-48-06/PRETRAIN-Epoch-99-Loss-0.5566961133140487' \ +-last_epoch=99 \ +-amp diff --git a/image_classification/MAE/stat_define.py b/image_classification/MAE/stat_define.py new file mode 100644 index 00000000..ffca7595 --- /dev/null +++ b/image_classification/MAE/stat_define.py @@ -0,0 +1,61 @@ +import os +import glob +import paddle +from config import get_config +from transformer import build_mae_pretrain as build_model + +def count_gelu(layer, input, output): + activation_flops = 8 + x = input[0] + num = x.numel() + layer.total_ops += num * activation_flops + + +def count_softmax(layer, input, output): + softmax_flops = 5 # max/substract, exp, sum, divide + x = input[0] + num = x.numel() + layer.total_ops += num * softmax_flops + + +def count_layernorm(layer, input, output): + layer_norm_flops = 5 # get mean (sum), get variance (square and sum), scale(multiply) + x = input[0] + num = x.numel() + layer.total_ops += num * layer_norm_flops + + +cfg = './configs/vit_large_patch32_384.yaml' +#input_size = (1, 3, 224, 224) +input_size = (1, 3, 384, 384) +config = get_config(cfg) +model = build_model(config) + +custom_ops = {paddle.nn.GELU: count_gelu, + paddle.nn.LayerNorm: count_layernorm, + paddle.nn.Softmax: count_softmax, + } +print(os.path.basename(cfg)) +paddle.flops(model, + input_size=input_size, + custom_ops=custom_ops, + print_detail=False) + + +#for cfg in glob.glob('./configs/*.yaml'): +# #cfg = './configs/swin_base_patch4_window7_224.yaml' +# input_size = (1, 3, int(cfg[-8:-5]), int(cfg[-8:-5])) +# config = get_config(cfg) +# model = build_model(config) +# +# +# custom_ops = {paddle.nn.GELU: count_gelu, +# paddle.nn.LayerNorm: count_layernorm, +# paddle.nn.Softmax: count_softmax, +# } +# print(os.path.basename(cfg)) +# paddle.flops(model, +# input_size=input_size, +# custom_ops=custom_ops, +# print_detail=False) +# print('-----------') diff --git a/image_classification/MAE/transformer.py b/image_classification/MAE/transformer.py index 0fadf67f..f1fadc87 100644 --- a/image_classification/MAE/transformer.py +++ b/image_classification/MAE/transformer.py @@ -131,7 +131,8 @@ def __init__(self, def _init_weights(self): weight_attr = paddle.ParamAttr( - initializer=nn.initializer.TruncatedNormal(std=.02)) + #initializer=nn.initializer.TruncatedNormal(std=.02)) + initializer=nn.initializer.XavierUniform()) # MAE bias_attr = paddle.ParamAttr( initializer=nn.initializer.Constant(0.0)) return weight_attr, bias_attr @@ -195,7 +196,8 @@ def __init__(self, def _init_weights(self): weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) + #initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) + initializer=paddle.nn.initializer.XavierUniform()) # MAE bias_attr = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0)) return weight_attr, bias_attr @@ -432,11 +434,16 @@ def __init__(self, self.cls_token = paddle.create_parameter( shape=[1, 1, encoder_embed_dim], dtype='float32', - default_initializer=paddle.nn.initializer.Constant(0)) + default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) #MAE - self.encoder_position_embedding = get_position_encoding( - seq_len=1 + self.num_patches, - embed_dim=encoder_embed_dim) + self.encoder_position_embedding = paddle.create_parameter( + shape=[1, 1 + self.num_patches, encoder_embed_dim], + dtype='float32', + default_initializer=paddle.nn.initializer.Assign( + get_position_encoding(seq_len=1 + self.num_patches, + embed_dim=encoder_embed_dim) + ) + ) self.encoder = Encoder( encoder_embed_dim, @@ -460,11 +467,16 @@ def __init__(self, self.mask_token = paddle.create_parameter( shape=[1, 1, decoder_embed_dim], dtype='float32', - default_initializer=paddle.nn.initializer.Constant(0)) + default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) #MAE - self.decoder_position_embedding = get_position_encoding( - seq_len=1 + self.num_patches, - embed_dim=decoder_embed_dim) + self.decoder_position_embedding = paddle.create_parameter( + shape=[1, 1 + self.num_patches, decoder_embed_dim], + dtype='float32', + default_initializer=paddle.nn.initializer.Assign( + get_position_encoding(seq_len=1 + self.num_patches, + embed_dim=decoder_embed_dim) + ) + ) self.decoder = Decoder( decoder_embed_dim, @@ -488,9 +500,10 @@ def __init__(self, def _init_weights(self): weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) + #initializer=nn.initializer.TruncatedNormal(std=.02)) + initializer=nn.initializer.XavierUniform()) # MAE bias_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0)) + initializer=nn.initializer.Constant(0.0)) return weight_attr, bias_attr def patchify(self, images): @@ -640,9 +653,14 @@ def __init__(self, embed_dim, dropout) # create positional embedding - self.position_embedding = get_position_encoding( - seq_len=1 + self.patch_embedding.n_patches, - embed_dim=embed_dim) + self.position_embedding = paddle.create_parameter( + shape=[1, 1 + self.patch_embedding.n_patches, embed_dim], + dtype='float32', + default_initializer=paddle.nn.initializer.Assign( + get_position_encoding(seq_len=1 + self.patch_embedding.n_patches, + embed_dim=embed_dim) + ) + ) # create class token self.cls_token = paddle.create_parameter( shape=[1, 1, embed_dim], @@ -701,7 +719,8 @@ def _init_weights_norm(self): return weight_attr, bias_attr def _init_weights_linear(self): - weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) + #weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) + weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()) # MAE bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) return weight_attr, bias_attr From b77d9ee039a95a300fbd3bc100f3c34b85cd8877 Mon Sep 17 00:00:00 2001 From: xperzy Date: Wed, 16 Feb 2022 18:39:29 +0800 Subject: [PATCH 06/12] update code and fix bugs --- image_classification/MAE/config.py | 4 +-- ...base_patch16_224_finetune_single_node.yaml | 1 - .../MAE/main_multi_gpu_finetune.py | 2 +- .../MAE/main_multi_gpu_linearprobe.py | 12 ++++----- .../MAE/main_multi_gpu_pretrain.py | 2 +- image_classification/MAE/run.sh | 26 ------------------- .../MAE/run_finetune_multi.sh | 2 +- .../MAE/run_linear_probe_multi.sh | 6 ++--- image_classification/MAE/stat_define.py | 21 ++++++++------- 9 files changed, 25 insertions(+), 51 deletions(-) delete mode 100644 image_classification/MAE/run.sh diff --git a/image_classification/MAE/config.py b/image_classification/MAE/config.py index c3a4a787..21b64308 100644 --- a/image_classification/MAE/config.py +++ b/image_classification/MAE/config.py @@ -118,9 +118,9 @@ # misc _C.SAVE = "./output" _C.TAG = "default" -_C.SAVE_FREQ = 20 # freq to save chpt +_C.SAVE_FREQ = 10 # freq to save chpt _C.REPORT_FREQ = 100 # freq to logging info -_C.VALIDATE_FREQ = 100 # freq to do validation +_C.VALIDATE_FREQ = 10 # freq to do validation _C.SEED = 0 _C.EVAL = False # run evaluation only _C.AMP = False # mix precision training diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml index e3dbb6c7..3a357b37 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml @@ -23,7 +23,6 @@ TRAIN: WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 END_LR: 1e-6 - ACCUM_ITER: 1 OPTIMIZER: NAME: 'AdamW' BETAS: (0.9, 0.999) diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/image_classification/MAE/main_multi_gpu_finetune.py index c5cb7647..0ea4b45a 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/image_classification/MAE/main_multi_gpu_finetune.py @@ -171,7 +171,7 @@ def train(dataloader, else: scaled = scaler.scale(loss) scaled.backward() - # TODO: check if manually unscale and clip grad is required here + # todo: check if manually unscale and clip grad is required here if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 scaler.step(optimizer) diff --git a/image_classification/MAE/main_multi_gpu_linearprobe.py b/image_classification/MAE/main_multi_gpu_linearprobe.py index ab4152b9..e2ecb4d1 100644 --- a/image_classification/MAE/main_multi_gpu_linearprobe.py +++ b/image_classification/MAE/main_multi_gpu_linearprobe.py @@ -163,7 +163,7 @@ def train(dataloader, else: scaled = scaler.scale(loss) scaled.backward() - # TODO: check if manually unscale and clip grad is required here + # todo: check if manually unscale and clip grad is required here if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 scaler.step(optimizer) @@ -211,7 +211,7 @@ def train(dataloader, @paddle.no_grad() def validate(dataloader, model, - optimizer, + criterion, total_batch, debug_steps=100, local_logger=None, @@ -241,8 +241,6 @@ def validate(dataloader, master_acc1_meter = AverageMeter() master_acc5_meter = AverageMeter() - if amp is True: - scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 time_st = time.time() for batch_id, data in enumerate(dataloader): @@ -250,7 +248,7 @@ def validate(dataloader, images = data[0] label = data[1] - output = model(image) + output = model(images) loss = criterion(output, label) pred = F.softmax(output) @@ -427,6 +425,9 @@ def main_worker(*args): # freeze all but the classifier for _, p in model.named_parameters(): p.stop_gradient = True + # set classifier trainable + for _, p in model.classifier.named_parameters(): + p.stop_gradient = False if config.MODEL.RESUME: assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True @@ -450,7 +451,6 @@ def main_worker(*args): criterion=criterion_val, total_batch=total_batch_train, debug_steps=config.REPORT_FREQ, - amp=config.AMP, local_logger=local_logger, master_logger=master_logger) diff --git a/image_classification/MAE/main_multi_gpu_pretrain.py b/image_classification/MAE/main_multi_gpu_pretrain.py index 9d829595..02bd5f5a 100644 --- a/image_classification/MAE/main_multi_gpu_pretrain.py +++ b/image_classification/MAE/main_multi_gpu_pretrain.py @@ -153,7 +153,7 @@ def train(dataloader, else: scaled = scaler.scale(loss) scaled.backward() - # TODO: check if manually unscale and clip grad is required here + # todo: check if manually unscale and clip grad is required here if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 scaler.step(optimizer) diff --git a/image_classification/MAE/run.sh b/image_classification/MAE/run.sh deleted file mode 100644 index b679ef28..00000000 --- a/image_classification/MAE/run.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -cur_time=`date +"%Y%m%d%H%M"` -job_name=ppvit_job${cur_time} -#group_name="idl-40g-0-yq01-k8s-gpu-a100-8" -group_name="idl-32g-1-yq01-k8s-gpu-v100-8" -job_version="paddle-fluid-custom" -image_addr="iregistry.baidu-int.com/idl/pytorch1.7.1:ubuntu16.04-cuda10.1_cudnn7_paddle_dev_0211" -start_cmd="sh run_pretrain_multi_vit_l_pdc.sh" -k8s_gpu_cards=8 -wall_time="00:00:00" -k8s_priority="high" -file_dir="." - - -paddlecloud job --ak 560ffe9013d3592e8119e5c7e811e796 --sk 19b5ecd6b8305d81ae27d596a7f2fe22 \ - train --job-name ${job_name} \ - --job-conf config.ini \ - --group-name ${group_name} \ - --start-cmd "${start_cmd}" \ - --file-dir ${file_dir} \ - --job-version ${job_version} \ - --k8s-gpu-cards ${k8s_gpu_cards} \ - --k8s-priority ${k8s_priority} \ - --wall-time ${wall_time} \ - --image-addr ${image_addr} \ - --is-standalone 1 diff --git a/image_classification/MAE/run_finetune_multi.sh b/image_classification/MAE/run_finetune_multi.sh index 283a448d..174cd949 100644 --- a/image_classification/MAE/run_finetune_multi.sh +++ b/image_classification/MAE/run_finetune_multi.sh @@ -1,6 +1,6 @@ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ python main_multi_gpu_finetune.py \ --cfg='./configs/vit_base_patch16_224_finetune.yaml' \ +-cfg='./configs/vit_base_patch16_224_finetune_single_node.yaml' \ -dataset='imagenet2012' \ -batch_size=32 \ -data_path='/dataset/imagenet' \ diff --git a/image_classification/MAE/run_linear_probe_multi.sh b/image_classification/MAE/run_linear_probe_multi.sh index 5d8ffd72..2400bb3c 100644 --- a/image_classification/MAE/run_linear_probe_multi.sh +++ b/image_classification/MAE/run_linear_probe_multi.sh @@ -1,8 +1,8 @@ -CUDA_VISIBLE_DEVICES=0,1 \ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ python main_multi_gpu_linearprobe.py \ -cfg='./configs/vit_base_patch16_224_linearprobe.yaml' \ -dataset='imagenet2012' \ --batch_size=2 \ +-batch_size=512 \ -data_path='/dataset/imagenet' \ -amp \ --pretrained='./output/train-20220125-17-48-06/PRETRAIN-Epoch-99-Loss-0.5566961133140487' +-pretrained='./mae_pretrain_vit_base' diff --git a/image_classification/MAE/stat_define.py b/image_classification/MAE/stat_define.py index ffca7595..207cef5f 100644 --- a/image_classification/MAE/stat_define.py +++ b/image_classification/MAE/stat_define.py @@ -2,32 +2,33 @@ import glob import paddle from config import get_config -from transformer import build_mae_pretrain as build_model +from transformer import build_transformer as build_model +#from transformer import build_mae_pretrain as build_model -def count_gelu(layer, input, output): +def count_gelu(layer, inputs, output): activation_flops = 8 - x = input[0] + x = inputs[0] num = x.numel() layer.total_ops += num * activation_flops -def count_softmax(layer, input, output): +def count_softmax(layer, inputs, output): softmax_flops = 5 # max/substract, exp, sum, divide - x = input[0] + x = inputs[0] num = x.numel() layer.total_ops += num * softmax_flops -def count_layernorm(layer, input, output): +def count_layernorm(layer, inputs, output): layer_norm_flops = 5 # get mean (sum), get variance (square and sum), scale(multiply) - x = input[0] + x = inputs[0] num = x.numel() layer.total_ops += num * layer_norm_flops -cfg = './configs/vit_large_patch32_384.yaml' -#input_size = (1, 3, 224, 224) -input_size = (1, 3, 384, 384) +cfg = './configs/vit_large_patch16_224_finetune.yaml' +input_size = (1, 3, 224, 224) +#input_size = (1, 3, 384, 384) config = get_config(cfg) model = build_model(config) From b4e8f71c77f7b896b3ba9df510ef8a491a94e0be Mon Sep 17 00:00:00 2001 From: xperzy Date: Wed, 16 Feb 2022 18:41:24 +0800 Subject: [PATCH 07/12] update comments --- image_classification/MAE/main_multi_gpu_finetune.py | 1 - image_classification/MAE/main_multi_gpu_linearprobe.py | 1 - image_classification/MAE/main_multi_gpu_pretrain.py | 1 - 3 files changed, 3 deletions(-) diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/image_classification/MAE/main_multi_gpu_finetune.py index 0ea4b45a..a08b9083 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/image_classification/MAE/main_multi_gpu_finetune.py @@ -171,7 +171,6 @@ def train(dataloader, else: scaled = scaler.scale(loss) scaled.backward() - # todo: check if manually unscale and clip grad is required here if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 scaler.step(optimizer) diff --git a/image_classification/MAE/main_multi_gpu_linearprobe.py b/image_classification/MAE/main_multi_gpu_linearprobe.py index e2ecb4d1..e7f2cfa1 100644 --- a/image_classification/MAE/main_multi_gpu_linearprobe.py +++ b/image_classification/MAE/main_multi_gpu_linearprobe.py @@ -163,7 +163,6 @@ def train(dataloader, else: scaled = scaler.scale(loss) scaled.backward() - # todo: check if manually unscale and clip grad is required here if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 scaler.step(optimizer) diff --git a/image_classification/MAE/main_multi_gpu_pretrain.py b/image_classification/MAE/main_multi_gpu_pretrain.py index 02bd5f5a..7e7aa134 100644 --- a/image_classification/MAE/main_multi_gpu_pretrain.py +++ b/image_classification/MAE/main_multi_gpu_pretrain.py @@ -153,7 +153,6 @@ def train(dataloader, else: scaled = scaler.scale(loss) scaled.backward() - # todo: check if manually unscale and clip grad is required here if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 scaler.step(optimizer) From 7d2398a710b522f3a5195c484c9407eefec72507 Mon Sep 17 00:00:00 2001 From: xperzy Date: Wed, 2 Mar 2022 17:27:41 +0800 Subject: [PATCH 08/12] fix bugs for finetuning --- image_classification/MAE/README.md | 10 + image_classification/MAE/augment.py | 426 +++++++++---- image_classification/MAE/config.py | 60 +- .../vit_base_patch16_224_finetune.yaml | 11 +- ...base_patch16_224_finetune_single_node.yaml | 11 +- .../vit_base_patch16_224_linearprobe.yaml | 21 +- .../vit_base_patch16_224_pretrain.yaml | 3 +- .../vit_base_patch16_224_pretrain_dec1.yaml | 36 -- .../vit_huge_patch14_224_finetune.yaml | 15 +- .../vit_huge_patch14_224_linearprobe.yaml | 21 +- .../vit_huge_patch14_224_pretrain.yaml | 3 +- .../vit_large_patch16_224_finetune.yaml | 11 +- .../vit_large_patch16_224_linearprobe.yaml | 23 +- .../vit_large_patch16_224_pretrain.yaml | 4 +- image_classification/MAE/datasets.py | 55 +- .../MAE/load_pytorch_weights.py | 215 +++++++ .../MAE/load_pytorch_weights_finetune.py | 188 ++++++ image_classification/MAE/lr_decay.py | 44 +- .../MAE/main_multi_gpu_finetune.py | 301 ++++++---- .../MAE/main_multi_gpu_linearprobe.py | 563 ------------------ .../MAE/main_multi_gpu_pretrain.py | 361 ----------- image_classification/MAE/pos_embed.py | 56 ++ .../MAE/run_finetune_multi.sh | 9 - .../MAE/run_finetune_vit_b.sh | 9 + .../MAE/run_finetune_vit_b_single_node.sh | 9 + .../MAE/run_linear_probe_multi.sh | 8 - .../MAE/run_pretrain_multi.sh | 7 - .../MAE/run_pretrain_multi_resume.sh | 9 - image_classification/MAE/stat_define.py | 62 -- image_classification/MAE/transformer.py | 57 +- image_classification/MAE/utils.py | 29 +- 31 files changed, 1171 insertions(+), 1466 deletions(-) delete mode 100644 image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml create mode 100644 image_classification/MAE/load_pytorch_weights.py create mode 100644 image_classification/MAE/load_pytorch_weights_finetune.py delete mode 100644 image_classification/MAE/main_multi_gpu_linearprobe.py delete mode 100644 image_classification/MAE/main_multi_gpu_pretrain.py create mode 100644 image_classification/MAE/pos_embed.py delete mode 100644 image_classification/MAE/run_finetune_multi.sh create mode 100644 image_classification/MAE/run_finetune_vit_b.sh create mode 100644 image_classification/MAE/run_finetune_vit_b_single_node.sh delete mode 100644 image_classification/MAE/run_linear_probe_multi.sh delete mode 100644 image_classification/MAE/run_pretrain_multi.sh delete mode 100644 image_classification/MAE/run_pretrain_multi_resume.sh delete mode 100644 image_classification/MAE/stat_define.py diff --git a/image_classification/MAE/README.md b/image_classification/MAE/README.md index 98bf486a..769a22cd 100644 --- a/image_classification/MAE/README.md +++ b/image_classification/MAE/README.md @@ -14,9 +14,19 @@ This implementation is developed by [PaddleViT](https://github.com/BR-IDL/Paddle ### Update +- Update (2022-03-02): Code is refactored and bugs are fixed. - Update (2022-02-15): Code is refactored and ported weights are uploaded. - Update (2021-12-13): Code is released. +## Note: +Current Version requires extra packages installed: `paddlenlp`. +You can use the following command to install paddlenlp: +```shell +pip install paddlenlp +``` +> Note: the reason to use paddlenlp is we found the AdamW in paddle cannot handle layer wise decay properly, instead the paddlenlp.ops.optimizer.AdamWLD works well in our case, so we import this op for temp fix. + + ## Models Zoo | Finetuned Model | Acc@1 | Acc@5 | #Params | FLOPs | Image Size | Crop_pct | Interpolation | Link | |-------------------------------|-------|-------|---------|--------|------------|----------|---------------|--------------| diff --git a/image_classification/MAE/augment.py b/image_classification/MAE/augment.py index 7a7f081c..b6ffbe12 100644 --- a/image_classification/MAE/augment.py +++ b/image_classification/MAE/augment.py @@ -21,23 +21,98 @@ # https://arxiv.org/abs/1805.09501 import random +import math import numpy as np -from PIL import Image, ImageEnhance, ImageOps +from PIL import Image, ImageEnhance, ImageOps, ImageChops +import PIL +LEVEL_DENOM = 10 -def auto_augment_policy_original(): - """25 types of augment policies in original paper""" +#NOTE: fill color is set to 128 instead fo image mean + +def auto_augment_policy_v0(hparams): + """policy v0: hack from timm""" + # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + policy = [[SubPolicy(*args) for args in subpolicy] for subpolicy in policy] + return policy + + +def auto_augment_policy_v0r(hparams): + """policy v0r: hack from timm""" + # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used + # in Google research implementation (number of bits discarded increases with magnitude) + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + policy = [[SubPolicy(*args) for args in subpolicy] for subpolicy in policy] + return policy + + +def auto_augment_policy_originalr(hparams): + """policy originalr: hack from timm""" + # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation policy = [ - [('Posterize', 0.4, 8), ('Rotate', 0.6, 9)], + [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], - [('Posterize', 0.6, 7), ('Posterize', 0.6, 6)], + [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], - [('Posterize', 0.8, 5), ('Equalize', 1.0, 2)], + [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], - [('Equalize', 0.6, 8), ('Posterize', 0.4, 6)], + [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], [('Rotate', 0.8, 8), ('Color', 0.4, 0)], [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], @@ -58,22 +133,36 @@ def auto_augment_policy_original(): return policy -def rand_augment_policy_original(magnitude_idx=9): - """ - 14 types of augment policies in original paper - Args: - magnitude_idx: M - """ +def auto_augment_policy_original(): + """25 types of augment policies in original paper""" policy = [ - ('Posterize', 1, magnitude_idx), ('Rotate', 1, magnitude_idx), - ('Solarize', 1, magnitude_idx), ('AutoContrast', 1, magnitude_idx), - ('Equalize', 1, magnitude_idx), ('Contrast', 1, magnitude_idx), - ('Color', 1, magnitude_idx), ('Invert', 1, magnitude_idx), - ('Sharpness', 1, magnitude_idx), ('Brightness', 1, magnitude_idx), - ('ShearX', 1, magnitude_idx), ('ShearY', 1, magnitude_idx), - ('TranslateX', 1, magnitude_idx), ('TranslateY', 1, magnitude_idx), + [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], ] - policy = [SubPolicy(*args) for args in policy] + policy = [[SubPolicy(*args) for args in subpolicy] for subpolicy in policy] return policy @@ -101,6 +190,33 @@ def __call__(self, image, policy_idx=None): return image +def rand_augment_policy_increasing(prob=0.5, magnitude_idx=9, magnitude_std=0.5): + """ + Rand augment policy: default rand-m9-mstd0.5-inc1 + """ + policy = [ + ('AutoContrast', prob, magnitude_idx, magnitude_std), + ('Equalize', prob, magnitude_idx, magnitude_std), + ('Invert', prob, magnitude_idx, magnitude_std), + ('Rotate', prob, magnitude_idx, magnitude_std), + + ('PosterizeIncreasing', prob, magnitude_idx, magnitude_std), + ('SolarizeIncreasing', prob, magnitude_idx, magnitude_std), + ('SolarizeAdd', prob, magnitude_idx, magnitude_std), + ('ColorIncreasing', prob, magnitude_idx, magnitude_std), + ('ContrastIncreasing', prob, magnitude_idx, magnitude_std), + ('BrightnessIncreasing', prob, magnitude_idx, magnitude_std), + ('SharpnessIncreasing', prob, magnitude_idx, magnitude_std), + + ('ShearX', prob, magnitude_idx, magnitude_std), + ('ShearY', prob, magnitude_idx, magnitude_std), + ('TranslateX', prob, magnitude_idx, magnitude_std), + ('TranslateY', prob, magnitude_idx, magnitude_std), + ] + policy = [SubPolicy(*args) for args in policy] + return policy + + class RandAugment(): """Rand Augment Randomly choose N augment ops from a list of K policies @@ -136,112 +252,207 @@ class SubPolicy: Args: op_name: str, augment operation name prob: float, if prob > random prob, apply augment - magnitude_idx: int, index of magnitude in preset magnitude ranges + magnitude: int, index of magnitude in preset magnitude ranges + magnitude_std: float, std of magnitude in preset magnitude ranges """ - def __init__(self, op_name, prob, magnitude_idx): - # ranges of operations' magnitude - ranges = { - 'ShearX': np.linspace(0, 0.3, 10), # [-0.3, 0.3] (by random negative) - 'ShearY': np.linspace(0, 0.3, 10), # [-0.3, 0.3] (by random negative) - 'TranslateX': np.linspace(0, 150 / 331, 10), # [-0.45, 0.45] (by random negative) - 'TranslateY': np.linspace(0, 150 / 331, 10), # [-0.45, 0.45] (by random negative) - 'Rotate': np.linspace(0, 30, 10), # [-30, 30] (by random negative) - 'Color': np.linspace(0, 0.9, 10), # [-0.9, 0.9] (by random negative) - 'Posterize': np.round(np.linspace(8, 4, 10), 0).astype(np.int), # [0, 4] - 'Solarize': np.linspace(256, 0, 10), # [0, 256] - 'Contrast': np.linspace(0, 0.9, 10), # [-0.9, 0.9] (by random negative) - 'Sharpness': np.linspace(0, 0.9, 10), # [-0.9, 0.9] (by random negative) - 'Brightness': np.linspace(0, 0.9, 10), # [-0.9, 0.9] (by random negative) - 'AutoContrast': [0] * 10, # no range - 'Equalize': [0] * 10, # no range - 'Invert': [0] * 10, # no range - } - - # augmentation operations - # Lambda is not pickleable for DDP - # image_ops = { - # 'ShearX': lambda image, magnitude: shear_x(image, magnitude), - # 'ShearY': lambda image, magnitude: shear_y(image, magnitude), - # 'TranslateX': lambda image, magnitude: translate_x(image, magnitude), - # 'TranslateY': lambda image, magnitude: translate_y(image, magnitude), - # 'Rotate': lambda image, magnitude: rotate(image, magnitude), - # 'AutoContrast': lambda image, magnitude: auto_contrast(image, magnitude), - # 'Invert': lambda image, magnitude: invert(image, magnitude), - # 'Equalize': lambda image, magnitude: equalize(image, magnitude), - # 'Solarize': lambda image, magnitude: solarize(image, magnitude), - # 'Posterize': lambda image, magnitude: posterize(image, magnitude), - # 'Contrast': lambda image, magnitude: contrast(image, magnitude), - # 'Color': lambda image, magnitude: color(image, magnitude), - # 'Brightness': lambda image, magnitude: brightness(image, magnitude), - # 'Sharpness': lambda image, magnitude: sharpness(image, magnitude), - # } + def __init__(self, op_name, prob, magnitude, magnitude_std=0.5): image_ops = { 'ShearX': shear_x, 'ShearY': shear_y, - 'TranslateX': translate_x_relative, - 'TranslateY': translate_y_relative, + 'TranslateX': translate_x_absolute, + 'TranslateY': translate_y_absolute, + 'TranslateXRel': translate_x_relative, + 'TranslateYRel': translate_y_relative, 'Rotate': rotate, 'AutoContrast': auto_contrast, 'Invert': invert, 'Equalize': equalize, 'Solarize': solarize, + 'SolarizeIncreasing': solarize, + 'SolarizeAdd': solarize_add, 'Posterize': posterize, + 'PosterizeIncreasing': posterize, + 'PosterizeOriginal': posterize, 'Contrast': contrast, + 'ContrastIncreasing': contrast, 'Color': color, + 'ColorIncreasing': color, 'Brightness': brightness, + 'BrightnessIncreasing': brightness, 'Sharpness': sharpness, + 'SharpnessIncreasing': sharpness, + } + + level_fn = { + 'ShearX': shear_level_to_arg, + 'ShearY': shear_level_to_arg, + 'TranslateX': translate_absolute_level_to_arg, + 'TranslateY': translate_absolute_level_to_arg, + 'TranslateXRel': translate_relative_level_to_arg, + 'TranslateYRel': translate_relative_level_to_arg, + 'Rotate': rotate_level_to_arg, + 'AutoContrast': None, + 'Invert': None, + 'Equalize': None, + 'Solarize': solarize_level_to_arg, + 'SolarizeIncreasing': solarize_increasing_level_to_arg, + 'SolarizeAdd': solarize_add_level_to_arg, + 'Posterize': posterize_level_to_arg, + 'PosterizeIncreasing': posterize_increasing_level_to_arg, + 'PosterizeOriginal': posterize_original_level_to_arg, + 'Contrast': enhance_level_to_arg, + 'ContrastIncreasing': enhance_increasing_level_to_arg, + 'Color': enhance_level_to_arg, + 'ColorIncreasing': enhance_increasing_level_to_arg, + 'Brightness': enhance_level_to_arg, + 'BrightnessIncreasing': enhance_increasing_level_to_arg, + 'Sharpness': enhance_level_to_arg, + 'SharpnessIncreasing': enhance_increasing_level_to_arg, } self.prob = prob - self.magnitude = ranges[op_name][magnitude_idx] + self.magnitude = magnitude + self.magnitude_std = magnitude_std + self.op = image_ops[op_name] + self.level_fn = level_fn[op_name] def __call__(self, image): - if self.prob > random.random(): - image = self.op(image, self.magnitude) + if self.prob < 1.0 and random.random() > self.prob: + return image + + magnitude = self.magnitude + # hack from timm auto_augment.py + if self.magnitude_std > 0: + if self.magnitude_std == float('inf'): + magnitude = random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + upper_bound = LEVEL_DENOM + magnitude = max(0, min(magnitude, upper_bound)) + level_args = self.level_fn(magnitude) if self.level_fn is not None else tuple() + image = self.op(image, *level_args) return image +################################################################# +# Convert level to Image op arguments +################################################################# +def randomly_negate(v): + """negate the value with 0.5 prob""" + return -v if random.random() > 0.5 else v + + +def shear_level_to_arg(level): + # range [-0.3, 0.3] + level = (level / LEVEL_DENOM) * 0.3 + level = randomly_negate(level) + return level, + + +def translate_absolute_level_to_arg(level): + # translate const = 100 + level = (level / LEVEL_DENOM) * 100. + level = randomly_negate(level) + return level, + + +def translate_relative_level_to_arg(level): + # range [-0.45, 0.45] + level = (level / LEVEL_DENOM) * 0.45 + level = randomly_negate(level) + return level, + + +def rotate_level_to_arg(level): + # range [-30, 30] + level = (level / LEVEL_DENOM) * 30. + level = randomly_negate(level) + return level, + + +def solarize_level_to_arg(level): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return int((level / LEVEL_DENOM) * 256), + + +def solarize_increasing_level_to_arg(level): + # range [0, 256] + # intensity/severity of augmentation increases with level + return 256 - int((level / LEVEL_DENOM) * 256), + + +def solarize_add_level_to_arg(level): + # range [0, 110] + return int((level / LEVEL_DENOM) * 110), + + +def posterize_level_to_arg(level): + # range [0, 4] + # intensity/severity of augmentation decreases with level + return int((level / LEVEL_DENOM) * 4), + + +def posterize_increasing_level_to_arg(level): + # range [4, 0] + # intensity/severity of augmentation increases with level + return 4 - int((level / LEVEL_DENOM) * 4), + + +def posterize_original_level_to_arg(level): + # range [4, 8] + # intensity/severity of augmentation decreases with level + return int((level / LEVEL_DENOM) * 4) + 4, + + +# For Contrast, Color, Brightness, Sharpness +def enhance_level_to_arg(level): + # range [0.1, 1.9] + return (level / LEVEL_DENOM) * 1.8 + 0.1, + + +# For ContrastIncreasing, ColorIncreasing, BrightnessIncreasing, SharpnessIncreasing +def enhance_increasing_level_to_arg(level): + # range [0.1, 1.9] + level = (level / LEVEL_DENOM) * 0.9 + level = max(0.1, 1.0 + randomly_negate(level)) + return level, + + +################################################################# # PIL Image transforms # https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transform -def shear_x(image, magnitude, fillcolor=(128, 128, 128)): - factor = magnitude * random.choice([-1, 1]) # random negative +################################################################# +def shear_x(image, factor, fillcolor=(128, 128, 128)): return image.transform(image.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), fillcolor=fillcolor) -def shear_y(image, magnitude, fillcolor=(128, 128, 128)): - factor = magnitude * random.choice([-1, 1]) # random negative +def shear_y(image, factor, fillcolor=(128, 128, 128)): return image.transform(image.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), fillcolor=fillcolor) -def translate_x_relative(image, magnitude, fillcolor=(128, 128, 128)): - pixels = magnitude * image.size[0] - pixels = pixels * random.choice([-1, 1]) # random negative +def translate_x_absolute(image, pixels, fillcolor=(128, 128, 128)): return image.transform(image.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), fillcolor=fillcolor) -def translate_y_relative(image, magnitude, fillcolor=(128, 128, 128)): - pixels = magnitude * image.size[0] - pixels = pixels * random.choice([-1, 1]) # random negative +def translate_y_absolute(image, pixels, fillcolor=(128, 128, 128)): return image.transform(image.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), fillcolor=fillcolor) -def translate_x_absolute(image, magnitude, fillcolor=(128, 128, 128)): - magnitude = magnitude * random.choice([-1, 1]) # random negative - return image.transform(image.size, Image.AFFINE, (1, 0, magnitude, 0, 1, 0), fillcolor=fillcolor) +def translate_x_relative(image, pct, fillcolor=(128, 128, 128)): + pixels = pct * image.size[0] + return image.transform(image.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), fillcolor=fillcolor) -def translate_y_absolute(image, magnitude, fillcolor=(128, 128, 128)): - magnitude = magnitude * random.choice([-1, 1]) # random negative - return image.transform(image.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude), fillcolor=fillcolor) +def translate_y_relative(image, pct, fillcolor=(128, 128, 128)): + pixels = pct * image.size[0] + return image.transform(image.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), fillcolor=fillcolor) -def rotate(image, magnitude): - rot = image.convert("RGBA").rotate(magnitude) - return Image.composite(rot, - Image.new('RGBA', rot.size, (128,) * 4), - rot).convert(image.mode) +def rotate(image, degrees): + return image.rotate(degrees) def auto_contrast(image, magnitude=None): @@ -256,30 +467,43 @@ def equalize(image, magnitude=None): return ImageOps.equalize(image) -def solarize(image, magnitude): - return ImageOps.solarize(image, magnitude) +def solarize(image, thresh): + return ImageOps.solarize(image, thresh) + +def solarize_add(image, add, thresh=128): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if image.mode in ("L", "RGB"): + if image.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return image.point(lut) + else: + return image -def posterize(image, magnitude): - return ImageOps.posterize(image, magnitude) + +def posterize(image, bits_to_keep): + if bits_to_keep >= 8: + return image + return ImageOps.posterize(image, bits_to_keep) -def contrast(image, magnitude): - magnitude = magnitude * random.choice([-1, 1]) # random negative - return ImageEnhance.Contrast(image).enhance(1 + magnitude) +def contrast(image, factor): + return ImageEnhance.Contrast(image).enhance(factor) -def color(image, magnitude): - magnitude = magnitude * random.choice([-1, 1]) # random negative - return ImageEnhance.Color(image).enhance(1 + magnitude) +def color(image, factor): + return ImageEnhance.Color(image).enhance(factor) -def brightness(image, magnitude): - magnitude = magnitude * random.choice([-1, 1]) # random negative - return ImageEnhance.Brightness(image).enhance(1 + magnitude) +def brightness(image, factor): + return ImageEnhance.Brightness(image).enhance(factor) -def sharpness(image, magnitude): - magnitude = magnitude * random.choice([-1, 1]) # random negative - return ImageEnhance.Sharpness(image).enhance(1 + magnitude) +def sharpness(image, factor): + return ImageEnhance.Sharpness(image).enhance(factor) diff --git a/image_classification/MAE/config.py b/image_classification/MAE/config.py index 21b64308..b0ad64e2 100644 --- a/image_classification/MAE/config.py +++ b/image_classification/MAE/config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,28 +28,27 @@ # data settings _C.DATA = CN() -_C.DATA.BATCH_SIZE = 256 # 256 # train batch_size for single GPU -_C.DATA.BATCH_SIZE_EVAL = 8 # 64 # val batch_size for single GPU +_C.DATA.BATCH_SIZE = 256 # train batch_size on single GPU +_C.DATA.BATCH_SIZE_EVAL = 256 # (disabled in update_config) val batch_size on single GPU _C.DATA.DATA_PATH = '/dataset/imagenet/' # path to dataset -_C.DATA.DATASET = 'imagenet2012' # dataset name -_C.DATA.IMAGE_SIZE = 224 # input image size: 224 for pretrain, 384 for finetune -# input image scale ratio, scale is applied before centercrop in eval mode -_C.DATA.CROP_PCT = 0.875 -_C.DATA.NUM_WORKERS = 4 # number of data loading threads -_C.DATA.IMAGENET_MEAN = [0.485, 0.456, 0.406] # [0.5, 0.5, 0.5] -_C.DATA.IMAGENET_STD = [0.229, 0.224, 0.225] # [0.5, 0.5, 0.5] +_C.DATA.DATASET = 'imagenet2012' # dataset name, currently only support imagenet2012 +_C.DATA.IMAGE_SIZE = 224 # input image size: 224 for pretrain +_C.DATA.CROP_PCT = 0.875 # input image scale ratio, scale is applied before centercrop in eval mode +_C.DATA.NUM_WORKERS = 1 # number of data loading threads +_C.DATA.IMAGENET_MEAN = [0.485, 0.456, 0.406] # [0.5, 0.5, 0.5] # imagenet mean values +_C.DATA.IMAGENET_STD = [0.229, 0.224, 0.225] # [0.5, 0.5, 0.5] # imagenet std values # model settings _C.MODEL = CN() -_C.MODEL.TYPE = 'PRETRAIN' # [PRETRAIN, FINETUNE, LINEARPROBE] +_C.MODEL.TYPE = 'PRETRAIN' # [PRETRAIN, FINETUNE, LINEARPROBE] # used to fetch data augmentation _C.MODEL.NAME = 'MAE' _C.MODEL.RESUME = None _C.MODEL.PRETRAINED = None _C.MODEL.NUM_CLASSES = 1000 -_C.MODEL.DROPOUT = 0.1 -_C.MODEL.DROPPATH = 0.1 -_C.MODEL.ATTENTION_DROPOUT = 0.1 -_C.MODEL.GLOBAL_POOL = False # use for finetune only +_C.MODEL.DROPOUT = 0.0 +_C.MODEL.DROPPATH = 0.0 +_C.MODEL.ATTENTION_DROPOUT = 0.0 +_C.MODEL.GLOBAL_POOL = False # Pretrain: N/A, Finetune: True, Linearprobe: False # transformer settings _C.MODEL.TRANS = CN() @@ -57,7 +56,7 @@ _C.MODEL.TRANS.MLP_RATIO = 4.0 _C.MODEL.TRANS.QKV_BIAS = True _C.MODEL.TRANS.MASK_RATIO = 0.75 -_C.MODEL.TRANS.NORM_PIX_LOSS = True +_C.MODEL.TRANS.NORM_PIX_LOSS = True # effective only for Pretrain _C.MODEL.TRANS.ENCODER = CN() _C.MODEL.TRANS.ENCODER.DEPTH = 12 _C.MODEL.TRANS.ENCODER.EMBED_DIM = 768 @@ -65,35 +64,36 @@ _C.MODEL.TRANS.DECODER = CN() _C.MODEL.TRANS.DECODER.DEPTH = 8 _C.MODEL.TRANS.DECODER.EMBED_DIM = 512 -_C.MODEL.TRANS.DECODER.NUM_HEADS = 8 +_C.MODEL.TRANS.DECODER.NUM_HEADS = 16 # training settings (for Vit-L/16 pretrain) _C.TRAIN = CN() _C.TRAIN.LAST_EPOCH = 0 _C.TRAIN.NUM_EPOCHS = 800 -_C.TRAIN.WARMUP_EPOCHS = 40 # 34 # ~ 10k steps for 4096 batch size -_C.TRAIN.WEIGHT_DECAY = 0.05 # 0.3 # 0.0 for finetune -_C.TRAIN.BASE_LR = 1.5e-4 # 0.003 for pretrain # 0.03 for finetune -_C.TRAIN.WARMUP_START_LR = 1e-6 # 0.0 -_C.TRAIN.END_LR = 5e-4 +_C.TRAIN.WARMUP_EPOCHS = 40 +_C.TRAIN.WEIGHT_DECAY = 0.05 +_C.TRAIN.BASE_LR = 1.5e-4 +_C.TRAIN.WARMUP_START_LR = 1e-6 # 0.0 # not used in MAE +_C.TRAIN.END_LR = 0.0 # 1e-6 _C.TRAIN.GRAD_CLIP = None _C.TRAIN.ACCUM_ITER = 1 -_C.TRAIN.LINEAR_SCALED_LR = None +_C.TRAIN.LINEAR_SCALED_LR = 256 _C.TRAIN.LAYER_DECAY = None # used for finetuning only # train augmentation (only for finetune) _C.TRAIN.SMOOTHING = 0.1 _C.TRAIN.COLOR_JITTER = 0.4 +_C.TRAIN.AUTO_AUGMENT = False _C.TRAIN.RAND_AUGMENT = True -_C.TRAIN.RAND_AUGMENT_LAYERS = 9 -_C.TRAIN.RAND_AUGMENT_MAGNITUDE = 5 # scale from 0 to 10 +_C.TRAIN.RAND_AUGMENT_LAYERS = 2 +_C.TRAIN.RAND_AUGMENT_MAGNITUDE = 9 # scale from 0 to 9 # mixup params -_C.TRAIN.MIXUP_ALPHA = 0.0 +_C.TRAIN.MIXUP_ALPHA = 0.8 _C.TRAIN.MIXUP_PROB = 1.0 _C.TRAIN.MIXUP_SWITCH_PROB = 0.5 _C.TRAIN.MIXUP_MODE = 'batch' -_C.TRAIN.CUTMIX_ALPHA = 0.0 +_C.TRAIN.CUTMIX_ALPHA = 1.0 _C.TRAIN.CUTMIX_MINMAX = None # random erase parameters _C.TRAIN.RANDOM_ERASE_PROB = 0.25 @@ -119,13 +119,13 @@ _C.SAVE = "./output" _C.TAG = "default" _C.SAVE_FREQ = 10 # freq to save chpt -_C.REPORT_FREQ = 100 # freq to logging info -_C.VALIDATE_FREQ = 10 # freq to do validation +_C.REPORT_FREQ = 20 # freq to logging info +_C.VALIDATE_FREQ = 1 # freq to do validation _C.SEED = 0 _C.EVAL = False # run evaluation only _C.AMP = False # mix precision training _C.LOCAL_RANK = 0 -_C.NGPUS = -1 +_C.NGPUS = -1 # not used in MAE fleet launch def _update_config_from_file(config, cfg_file): diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml b/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml index 106ddd1e..54eb672b 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml @@ -19,18 +19,17 @@ TRAIN: WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 BASE_LR: 5e-4 - WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 END_LR: 1e-6 ACCUM_ITER: 1 OPTIMIZER: - NAME: 'AdamW' + NAME: 'AdamWDL' BETAS: (0.9, 0.999) LAYER_DECAY: 0.65 SMOOTHING: 0.1 RAND_AUGMENT: True - RAND_AUGMENT_LAYERS: 9 - RAND_AUGMENT_MAGNITUDE: 5 + RAND_AUGMENT_LAYERS: 2 + RAND_AUGMENT_MAGNITUDE: 9 MIXUP_ALPHA: 0.8 MIXUP_PROB: 1.0 MIXUP_SWITCH_PROB: 0.5 @@ -42,3 +41,7 @@ TRAIN: RANDOM_ERASE_COUNT: 1 RANDOM_ERASE_SPLIT: False +VALIDATE_FREQ: 1 +SAVE_FREQ: 10 +REPORT_FREQ: 100 + diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml index 3a357b37..ae80de13 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml @@ -20,17 +20,16 @@ TRAIN: WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 BASE_LR: 5e-4 - WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 END_LR: 1e-6 OPTIMIZER: - NAME: 'AdamW' + NAME: 'AdamWDL' BETAS: (0.9, 0.999) LAYER_DECAY: 0.65 SMOOTHING: 0.1 RAND_AUGMENT: True - RAND_AUGMENT_LAYERS: 9 - RAND_AUGMENT_MAGNITUDE: 5 + RAND_AUGMENT_LAYERS: 2 + RAND_AUGMENT_MAGNITUDE: 9 MIXUP_ALPHA: 0.8 MIXUP_PROB: 1.0 MIXUP_SWITCH_PROB: 0.5 @@ -42,3 +41,7 @@ TRAIN: RANDOM_ERASE_COUNT: 1 RANDOM_ERASE_SPLIT: False +VALIDATE_FREQ: 1 +SAVE_FREQ: 10 +REPORT_FREQ: 20 + diff --git a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml b/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml index 3620ae6f..a3b66148 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml @@ -4,7 +4,6 @@ DATA: MODEL: TYPE: LINEARPROBE NAME: vit_base_patch16_224 - DROPPATH: 0.1 GLOBAL_POOL: False # enable cls_token TRANS: PATCH_SIZE: 16 @@ -19,26 +18,8 @@ TRAIN: WARMUP_EPOCHS: 10 WEIGHT_DECAY: 0.0 BASE_LR: 0.1 - WARMUP_START_LR: 0.0 LINEAR_SCALED_LR: 256 END_LR: 0.0 ACCUM_ITER: 1 OPTIMIZER: - NAME: 'AdamW' - BETAS: (0.9, 0.999) - #LAYER_DECAY: 0.75 - #SMOOTHING: 0.1 - #RAND_AUGMENT: True - #RAND_AUGMENT_LAYERS: 9 - #RAND_AUGMENT_MAGNITUDE: 5 - #MIXUP_ALPHA: 0.0 - #MIXUP_PROB: 1.0 - #MIXUP_SWITCH_PROB: 0.5 - #MIXUP_MODE: 'batch' - #CUTMIX_ALPHA: 0.0 - #CUTMIX_MINMAX: None - #RANDOM_ERASE_PROB: 0.25 - #RANDOM_ERASE_MODE: 'pixel' - #RANDOM_ERASE_COUNT: 1 - #RANDOM_ERASE_SPLIT: False - + NAME: 'LARS' diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml b/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml index 2badb0a3..205be30a 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml @@ -18,12 +18,13 @@ MODEL: EMBED_DIM: 512 DEPTH: 8 NUM_HEADS: 16 + NORM_PIX_LOSS: True TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 - WARMUP_START_LR: 1e-7 + END_LR: 0.0 LINEAR_SCALED_LR: 256 GRAD_CLIP: None ACCUM_ITER: 1 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml b/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml deleted file mode 100644 index 43f1fa73..00000000 --- a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml +++ /dev/null @@ -1,36 +0,0 @@ -DATA: - IMAGE_SIZE: 224 - CROP_PCT: 0.875 -MODEL: - TYPE: PRETRAIN - NAME: vit_base_patch16_224_dec1 - DROPPATH: 0.0 - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - MASK_RATIO: 0.75 - ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 - DECODER: - EMBED_DIM: 512 - DEPTH: 1 - NUM_HEADS: 8 -TRAIN: - NUM_EPOCHS: 800 - WARMUP_EPOCHS: 40 - WEIGHT_DECAY: 0.05 - BASE_LR: 1.5e-4 - WARMUP_START_LR: 1e-6 - GRAD_CLIP: 1 - ACCUM_ITER: 1 # the total batch size should be 4096 - LINEAR_SCALED_LR: None - - LR_SCHEDULER: - NAME: 'warmupcosine' - - OPTIMIZER: - NAME: 'AdamW' - BETAS: (0.9, 0.95) diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml index 0ddf9d4b..4b47a06a 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml +++ b/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml @@ -18,19 +18,18 @@ TRAIN: NUM_EPOCHS: 50 WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 - BASE_LR: 1e-3 - WARMUP_START_LR: 1e-7 + BASE_LR: 1e-3 # absolute_lr = base_lr * total_batch_size / 256 LINEAR_SCALED_LR: 256 END_LR: 1e-6 ACCUM_ITER: 1 OPTIMIZER: - NAME: 'AdamW' + NAME: 'AdamWDL' BETAS: (0.9, 0.999) - LAYER_DECAY: 0.75 + LAYER_DECAY: 0.75 # same as MAE official readme SMOOTHING: 0.1 RAND_AUGMENT: True - RAND_AUGMENT_LAYERS: 9 - RAND_AUGMENT_MAGNITUDE: 5 + RAND_AUGMENT_LAYERS: 2 + RAND_AUGMENT_MAGNITUDE: 9 MIXUP_ALPHA: 0.8 MIXUP_PROB: 1.0 MIXUP_SWITCH_PROB: 0.5 @@ -42,3 +41,7 @@ TRAIN: RANDOM_ERASE_COUNT: 1 RANDOM_ERASE_SPLIT: False +VALIDATE_FREQ: 1 +SAVE_FREQ: 10 +REPORT_FREQ: 100 + diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml index b47763e7..83e70b23 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml +++ b/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml @@ -4,7 +4,6 @@ DATA: MODEL: TYPE: LINEARPROBE NAME: vit_huge_patch14_224 - DROPPATH: 0.1 GLOBAL_POOL: False TRANS: PATCH_SIZE: 14 @@ -19,26 +18,8 @@ TRAIN: WARMUP_EPOCHS: 10 WEIGHT_DECAY: 0.0 BASE_LR: 0.1 - WARMUP_START_LR: 0.0 LINEAR_SCALED_LR: 256 END_LR: 0.0 ACCUM_ITER: 1 OPTIMIZER: - NAME: 'AdamW' - BETAS: (0.9, 0.999) - #LAYER_DECAY: 0.75 - #SMOOTHING: 0.1 - #RAND_AUGMENT: True - #RAND_AUGMENT_LAYERS: 9 - #RAND_AUGMENT_MAGNITUDE: 5 - #MIXUP_ALPHA: 0.8 - #MIXUP_PROB: 1.0 - #MIXUP_SWITCH_PROB: 0.5 - #MIXUP_MODE: 'batch' - #CUTMIX_ALPHA: 1.0 - #CUTMIX_MINMAX: None - #RANDOM_ERASE_PROB: 0.25 - #RANDOM_ERASE_MODE: 'pixel' - #RANDOM_ERASE_COUNT: 1 - #RANDOM_ERASE_SPLIT: False - + NAME: 'LARS' diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml b/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml index f791594d..485e9c76 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml +++ b/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml @@ -18,12 +18,13 @@ MODEL: EMBED_DIM: 512 DEPTH: 8 NUM_HEADS: 16 + NORM_PIX_LOSS: True TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 - WARMUP_START_LR: 1e-7 + END_LR: 0.0 LINEAR_SCALED_LR: 256 GRAD_CLIP: None ACCUM_ITER: 1 diff --git a/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml b/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml index e2a86bac..210f305c 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml +++ b/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml @@ -19,18 +19,17 @@ TRAIN: WARMUP_EPOCHS: 5 WEIGHT_DECAY: 0.05 BASE_LR: 1e-3 # absolute_lr = base_lr * total_batch_size / 256 - WARMUP_START_LR: 1e-7 LINEAR_SCALED_LR: 256 END_LR: 1e-6 ACCUM_ITER: 1 OPTIMIZER: - NAME: 'AdamW' + NAME: 'AdamWDL' BETAS: (0.9, 0.999) LAYER_DECAY: 0.75 # same as MAE official readme SMOOTHING: 0.1 RAND_AUGMENT: True - RAND_AUGMENT_LAYERS: 9 - RAND_AUGMENT_MAGNITUDE: 5 + RAND_AUGMENT_LAYERS: 2 + RAND_AUGMENT_MAGNITUDE: 9 MIXUP_ALPHA: 0.8 MIXUP_PROB: 1.0 MIXUP_SWITCH_PROB: 0.5 @@ -42,3 +41,7 @@ TRAIN: RANDOM_ERASE_COUNT: 1 RANDOM_ERASE_SPLIT: False +VALIDATE_FREQ: 1 +SAVE_FREQ: 10 +REPORT_FREQ: 100 + diff --git a/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml b/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml index e91bc21d..b01f3be6 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml +++ b/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml @@ -4,8 +4,7 @@ DATA: MODEL: TYPE: LINEARPROBE NAME: vit_large_patch16_224 - DROPPATH: 0.1 - GLOBAL_POOL: False + GLOBAL_POOL: False # enable cls_token TRANS: PATCH_SIZE: 16 MLP_RATIO: 4.0 @@ -19,26 +18,8 @@ TRAIN: WARMUP_EPOCHS: 10 WEIGHT_DECAY: 0.0 BASE_LR: 0.1 - WARMUP_START_LR: 0.0 LINEAR_SCALED_LR: 256 END_LR: 0.0 ACCUM_ITER: 1 OPTIMIZER: - NAME: 'AdamW' - BETAS: (0.9, 0.999) - #LAYER_DECAY: 0.75 - #SMOOTHING: 0.1 - #RAND_AUGMENT: True - #RAND_AUGMENT_LAYERS: 9 - #RAND_AUGMENT_MAGNITUDE: 5 - #MIXUP_ALPHA: 0.8 - #MIXUP_PROB: 1.0 - #MIXUP_SWITCH_PROB: 0.5 - #MIXUP_MODE: 'batch' - #CUTMIX_ALPHA: 1.0 - #CUTMIX_MINMAX: None - #RANDOM_ERASE_PROB: 0.25 - #RANDOM_ERASE_MODE: 'pixel' - #RANDOM_ERASE_COUNT: 1 - #RANDOM_ERASE_SPLIT: False - + NAME: 'LARS' diff --git a/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml b/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml index a90c4aa6..42ec4508 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml +++ b/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml @@ -10,7 +10,6 @@ MODEL: MLP_RATIO: 4.0 QKV_BIAS: true MASK_RATIO: 0.75 - NORM_PIX_LOSS: True ENCODER: EMBED_DIM: 1024 DEPTH: 24 @@ -19,12 +18,13 @@ MODEL: EMBED_DIM: 512 DEPTH: 8 NUM_HEADS: 16 + NORM_PIX_LOSS: True TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 WEIGHT_DECAY: 0.05 BASE_LR: 1.5e-4 - WARMUP_START_LR: 1e-7 + END_LR: 0.0 LINEAR_SCALED_LR: 256 GRAD_CLIP: None ACCUM_ITER: 1 diff --git a/image_classification/MAE/datasets.py b/image_classification/MAE/datasets.py index c90330d8..97414b06 100644 --- a/image_classification/MAE/datasets.py +++ b/image_classification/MAE/datasets.py @@ -28,7 +28,7 @@ from paddle.vision import image_load from augment import auto_augment_policy_original from augment import AutoAugment -from augment import rand_augment_policy_original +from augment import rand_augment_policy_increasing from augment import RandAugment from random_erasing import RandomErasing @@ -37,6 +37,9 @@ class ImageNet2012Dataset(Dataset): """Build ImageNet2012 dataset This class gets train/val imagenet datasets, which loads transfomed data and labels. + Note: + train_list.txt and val_list.txt is required. + Please refer https://github.com/BR-IDL/PaddleViT/tree/mae_refactor/image_classification#data-preparation Attributes: file_folder: path where imagenet images are stored @@ -93,6 +96,7 @@ def get_train_transforms_pretrain(config): def get_train_transforms_linearprobe(config): """Weak augmentation for linear probing""" aug_op_list = [transforms.RandomResizedCrop(size=(config.DATA.IMAGE_SIZE, config.DATA.IMAGE_SIZE), + scale=(0.08, 1.0), interpolation='bicubic'), # same as MAE pytorch transforms.RandomHorizontalFlip(), transforms.ToTensor(), @@ -107,13 +111,15 @@ def get_train_transforms_finetune(config): # STEP1: random crop and resize aug_op_list.append( transforms.RandomResizedCrop((config.DATA.IMAGE_SIZE, config.DATA.IMAGE_SIZE), - scale=(0.2, 1.0), interpolation='bicubic'))# Same as MAE pytorch + scale=(0.08, 1.0), interpolation='bicubic'))# Same as MAE pytorch # STEP2: random horizontalflip aug_op_list.append(transforms.RandomHorizontalFlip()) # STEP3: rand_augment or auto_augment or color jitter if config.TRAIN.RAND_AUGMENT: # MAE: True - policy = rand_augment_policy_original() - rand_augment = RandAugment(policy) + policy = rand_augment_policy_increasing( + magnitude_idx=config.TRAIN.RAND_AUGMENT_MAGNITUDE) + rand_augment = RandAugment( + policy=policy, num_layers=config.TRAIN.RAND_AUGMENT_LAYERS) aug_op_list.append(rand_augment) elif config.TRAIN.AUTO_AUGMENT: # MAE: None policy = auto_augment_policy_original() @@ -159,9 +165,12 @@ def get_train_transforms(config): elif config.MODEL.TYPE == "LINEARPROBE": transforms_train = get_train_transforms_linearprobe else: - raise ValueError('config.MODEL.TYPE not supported!') + raise ValueError(f'{config.MODEL.TYPE} not supported!') + + transforms = transforms_train(config) + print(transforms) - return transforms_train(config) + return transforms # val transform is for MAE finetune and line probing @@ -198,28 +207,13 @@ def get_dataset(config, mode='train'): Returns: dataset: dataset object """ - assert mode in ['train', 'val'] - if config.DATA.DATASET == "cifar10": - if mode == 'train': - dataset = datasets.Cifar10(mode=mode, transform=get_train_transforms(config)) - else: - mode = 'test' - dataset = datasets.Cifar10(mode=mode, transform=get_val_transforms(config)) - elif config.DATA.DATASET == "cifar100": - if mode == 'train': - dataset = datasets.Cifar100(mode=mode, transform=get_train_transforms(config)) - else: - mode = 'test' - dataset = datasets.Cifar100(mode=mode, transform=get_val_transforms(config)) - elif config.DATA.DATASET == "imagenet2012": - if mode == 'train': - dataset = ImageNet2012Dataset(config.DATA.DATA_PATH, - mode=mode, - transform=get_train_transforms(config)) - else: - dataset = ImageNet2012Dataset(config.DATA.DATA_PATH, - mode=mode, - transform=get_val_transforms(config)) + assert mode in ['train', 'val', 'test'] + # both val and test use get_val_transforms + if config.DATA.DATASET == "imagenet2012": + transform = get_train_transforms(config) if mode == 'train' else get_val_transforms(config) + dataset = ImageNet2012Dataset(config.DATA.DATA_PATH, + mode=mode, + transform=transform) else: raise NotImplementedError( "[{config.DATA.DATASET}] Only cifar10, cifar100, imagenet2012 are supported now") @@ -240,10 +234,7 @@ def get_dataloader(config, dataset, mode='train', multi_process=False): dataloader: paddle.io.DataLoader object. """ - if mode == 'train': - batch_size = config.DATA.BATCH_SIZE - else: - batch_size = config.DATA.BATCH_SIZE_EVAL + batch_size = config.DATA.BATCH_SIZE if mode == 'train' else config.DATA.BATCH_SIZE_EVAL if multi_process is True: sampler = DistributedBatchSampler(dataset, diff --git a/image_classification/MAE/load_pytorch_weights.py b/image_classification/MAE/load_pytorch_weights.py new file mode 100644 index 00000000..04d47874 --- /dev/null +++ b/image_classification/MAE/load_pytorch_weights.py @@ -0,0 +1,215 @@ +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import numpy as np +import paddle +import torch +import timm +from mae_pytorch import models_mae, models_vit +from transformer import build_mae_pretrain as build_model +#from transformer import build_transformer as build_model +from config import * +import random + +seed = 0 +torch.manual_seed(seed) +paddle.seed(seed) +np.random.seed(seed) +random.seed(seed) + +#model_name = 'mae_vit_huge_patch14' +#config = get_config(f'./configs/vit_huge_patch14_224_pretrain.yaml') + +#model_name = 'mae_vit_large_patch16' +#config = get_config(f'./configs/vit_large_patch16_224_pretrain.yaml') + +model_name = 'mae_vit_base_patch16' +config = get_config(f'./configs/vit_base_patch16_224_pretrain.yaml') + +def print_model_named_params(model): + print('----------------------------------') + for name, param in model.named_parameters(): + print(name, param.shape) + print('----------------------------------') + + +def print_model_named_buffers(model): + print('----------------------------------') + for name, param in model.named_buffers(): + print(name, param.shape) + print('----------------------------------') + + +def torch_to_paddle_mapping(): + mapping = [ + ('cls_token', f'cls_token'), + ('mask_token', f'mask_token'), + ('pos_embed', f'encoder_position_embedding'), + ('patch_embed.proj', f'patch_embedding.patch_embedding'), + ('norm', 'encoder.norm'), + ('decoder_embed', f'linear_projection'), + ('decoder_pos_embed', f'decoder_position_embedding'), + ('decoder_norm', f'decoder.norm'), + ('decoder_pred', f'decoder_pred'), + + ] + + if 'large' in model_name: + num_enc_layers = 24 + num_dec_layers = 8 + elif 'base' in model_name: + num_enc_layers = 12 + num_dec_layers = 8 + elif 'huge' in model_name: + num_enc_layers = 32 + num_dec_layers = 8 + else: + raise ValueError('now only support large and base model conversion') + + for idx in range(num_enc_layers): + pp_prefix = f'encoder.layers.{idx}' + th_prefix = f'blocks.{idx}' + layer_mapping = [ + (f'{th_prefix}.norm1', f'{pp_prefix}.attn_norm'), + (f'{th_prefix}.norm2', f'{pp_prefix}.mlp_norm'), + (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), + (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), + (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), + (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.out'), + ] + mapping.extend(layer_mapping) + + for idx in range(num_dec_layers): + pp_prefix = f'decoder.layers.{idx}' + th_prefix = f'decoder_blocks.{idx}' + layer_mapping = [ + (f'{th_prefix}.norm1', f'{pp_prefix}.attn_norm'), + (f'{th_prefix}.norm2', f'{pp_prefix}.mlp_norm'), + (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), + (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), + (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), + (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.out'), + ] + mapping.extend(layer_mapping) + + #head_mapping = [ + # #('head', 'classifier') + #] + #mapping.extend(head_mapping) + + return mapping + + +def convert(torch_model, paddle_model): + def _set_value(th_name, pd_name, transpose=True): + th_shape = th_params[th_name].shape + pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list + #assert th_shape == pd_shape, f'{th_shape} != {pd_shape}' + print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}') + if isinstance(th_params[th_name], torch.nn.parameter.Parameter): + value = th_params[th_name].data.numpy() + else: + value = th_params[th_name].numpy() + + if len(value.shape) == 2 and transpose: + value = value.transpose((1, 0)) + pd_params[pd_name].set_value(value) + + # 1. get paddle and torch model parameters + pd_params = {} + th_params = {} + for name, param in paddle_model.named_parameters(): + pd_params[name] = param + for name, param in torch_model.named_parameters(): + th_params[name] = param + + for name, param in paddle_model.named_buffers(): + pd_params[name] = param + for name, param in torch_model.named_buffers(): + th_params[name] = param + + # 2. get name mapping pairs + mapping = torch_to_paddle_mapping() + + # 3. set torch param values to paddle params: may needs transpose on weights + for th_name, pd_name in mapping: + if th_name in th_params.keys(): # nn.Parameters + _set_value(th_name, pd_name) + else: # weight & bias + th_name_w = f'{th_name}.weight' + pd_name_w = f'{pd_name}.weight' + _set_value(th_name_w, pd_name_w) + + if f'{th_name}.bias' in th_params.keys(): + th_name_b = f'{th_name}.bias' + pd_name_b = f'{pd_name}.bias' + _set_value(th_name_b, pd_name_b) + + return paddle_model + + +def main(): + + paddle.set_device('cpu') + paddle_model = build_model(config) + paddle_model.eval() + print_model_named_params(paddle_model) + print_model_named_buffers(paddle_model) + + print('+++++++++++++++++++++++++++++++++++') + device = torch.device('cpu') + #torch_model = models_vit.__dict__[model_name](global_pool=True) + torch_model = models_mae.__dict__[model_name](norm_pix_loss=True) + print_model_named_params(torch_model) + print_model_named_buffers(torch_model) + state_dict = torch.load('./mae_pretrain_vit_base.pth', map_location='cpu')['model'] + print('===========================') + for key in state_dict: + print(key) + print('===========================') + torch_model.load_state_dict(state_dict, strict=False) + torch_model = torch_model.to(device) + torch_model.eval() + + # convert weights + paddle_model = convert(torch_model, paddle_model) + + # check correctness + x = np.random.randn(2, 3, 224, 224).astype('float32') + x_paddle = paddle.to_tensor(x) + x_torch = torch.Tensor(x).to(device) + + #out_torch = torch_model(x_torch)[1] + #out_paddle = paddle_model(x_paddle)[1] + out_torch = torch_model.forward_encoder(x_torch, 0.0)[0] + out_paddle = paddle_model.forward_encoder(x_paddle, 0.0)[0] + + out_torch = out_torch.data.cpu().numpy() + out_paddle = out_paddle.cpu().numpy() + + print(out_torch.shape, out_paddle.shape) + print(out_torch[0, 0:100]) + print('========================================================') + print(out_paddle[0, 0:100]) + assert np.allclose(out_torch, out_paddle, atol = 1e-5) + + ## save weights for paddle model + #model_path = os.path.join(f'./{model_name}.pdparams') + #paddle.save(paddle_model.state_dict(), model_path) + #print('all done') + + +if __name__ == "__main__": + main() diff --git a/image_classification/MAE/load_pytorch_weights_finetune.py b/image_classification/MAE/load_pytorch_weights_finetune.py new file mode 100644 index 00000000..e1acab62 --- /dev/null +++ b/image_classification/MAE/load_pytorch_weights_finetune.py @@ -0,0 +1,188 @@ +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import numpy as np +import paddle +import torch +import timm +from mae_pytorch import models_mae, models_vit +from transformer import build_transformer as build_model +from config import * + +## vit-base +#model_path='./mae_finetuned_vit_base' +#model_name = 'vit_base_patch16' +#config = get_config(f'./configs/vit_base_patch16_224_finetune.yaml') + +# vit-large +#model_path='./mae_finetuned_vit_large' +#model_name = 'vit_large_patch16' +#config = get_config(f'./configs/vit_large_patch16_224_finetune.yaml') + +# vit-huge +model_path='./mae_finetuned_vit_huge' +model_name = 'vit_huge_patch14' +config = get_config(f'./configs/vit_huge_patch14_224_finetune.yaml') + + +def print_model_named_params(model): + print('----------------------------------') + for name, param in model.named_parameters(): + print(name, param.shape) + print('----------------------------------') + + +def print_model_named_buffers(model): + print('----------------------------------') + for name, param in model.named_buffers(): + print(name, param.shape) + print('----------------------------------') + + +def torch_to_paddle_mapping(): + mapping = [ + ('cls_token', f'cls_token'), + ('pos_embed', f'encoder_position_embedding'), + ('patch_embed.proj', f'patch_embedding.patch_embedding'), + ] + + if 'large' in model_name: + num_layers = 24 + elif 'base' in model_name: + num_layers = 12 + elif 'huge' in model_name: + num_layers = 32 + else: + raise ValueError('now only support large and base model conversion') + + for idx in range(num_layers): + pp_prefix = f'encoder.layers.{idx}' + th_prefix = f'blocks.{idx}' + layer_mapping = [ + (f'{th_prefix}.norm1', f'{pp_prefix}.attn_norm'), + (f'{th_prefix}.norm2', f'{pp_prefix}.mlp_norm'), + (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), + (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), + (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), + (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.out'), + ] + mapping.extend(layer_mapping) + + head_mapping = [ + #('norm', 'encoder_norm'), + ('fc_norm', 'encoder_norm'), + ('head', 'classifier') + ] + mapping.extend(head_mapping) + + return mapping + + + +def convert(torch_model, paddle_model): + def _set_value(th_name, pd_name, transpose=True): + th_shape = th_params[th_name].shape + pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list + #assert th_shape == pd_shape, f'{th_shape} != {pd_shape}' + print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}') + if isinstance(th_params[th_name], torch.nn.parameter.Parameter): + value = th_params[th_name].data.numpy() + else: + value = th_params[th_name].numpy() + + if len(value.shape) == 2 and transpose: + value = value.transpose((1, 0)) + pd_params[pd_name].set_value(value) + + # 1. get paddle and torch model parameters + pd_params = {} + th_params = {} + for name, param in paddle_model.named_parameters(): + pd_params[name] = param + for name, param in torch_model.named_parameters(): + th_params[name] = param + + for name, param in paddle_model.named_buffers(): + pd_params[name] = param + for name, param in torch_model.named_buffers(): + th_params[name] = param + + # 2. get name mapping pairs + mapping = torch_to_paddle_mapping() + + # 3. set torch param values to paddle params: may needs transpose on weights + for th_name, pd_name in mapping: + if th_name in th_params.keys(): # nn.Parameters + _set_value(th_name, pd_name) + else: # weight & bias + th_name_w = f'{th_name}.weight' + pd_name_w = f'{pd_name}.weight' + _set_value(th_name_w, pd_name_w) + + if f'{th_name}.bias' in th_params.keys(): + th_name_b = f'{th_name}.bias' + pd_name_b = f'{pd_name}.bias' + _set_value(th_name_b, pd_name_b) + + return paddle_model + + +def main(): + + paddle.set_device('cpu') + paddle_model = build_model(config) + paddle_model.eval() + print_model_named_params(paddle_model) + print_model_named_buffers(paddle_model) + + print('+++++++++++++++++++++++++++++++++++') + device = torch.device('cpu') + torch_model = models_vit.__dict__[model_name](global_pool=True) + print_model_named_params(torch_model) + print_model_named_buffers(torch_model) + state_dict = torch.load(f'{model_path}.pth', map_location='cpu')['model'] + torch_model.load_state_dict(state_dict, strict=False) + torch_model = torch_model.to(device) + torch_model.eval() + + #return + + # convert weights + paddle_model = convert(torch_model, paddle_model) + + # check correctness + x = np.random.randn(2, 3, 224, 224).astype('float32') + x_paddle = paddle.to_tensor(x) + x_torch = torch.Tensor(x).to(device) + + out_torch = torch_model(x_torch) + out_paddle = paddle_model(x_paddle) + + out_torch = out_torch.data.cpu().numpy() + out_paddle = out_paddle.cpu().numpy() + + print(out_torch.shape, out_paddle.shape) + print(out_torch[0, 0:100]) + print('========================================================') + print(out_paddle[0, 0:100]) + assert np.allclose(out_torch, out_paddle, atol = 1e-5) + + # save weights for paddle model + paddle.save(paddle_model.state_dict(), f'{model_path}.pdparams') + print('all done') + + +if __name__ == "__main__": + main() diff --git a/image_classification/MAE/lr_decay.py b/image_classification/MAE/lr_decay.py index 482eca45..2efe3592 100644 --- a/image_classification/MAE/lr_decay.py +++ b/image_classification/MAE/lr_decay.py @@ -14,48 +14,61 @@ """parameters groups for layer-wise lr decay, used in BeiT and MAE""" +import json + +# Note: param_groups_lrd is NOT used because paddle Adam optimizer seems has problems which we don't know, +# instead, we use paddlenlp.ops.optimizer.AdamWDL with lr_settings (see below) right now for temp fix. def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=0.75): + """layer-wise decay + set learning rate decay according to layer depth + Note: + 1. In Paddle param_groups, dict key 'learning_rate' is in fact the 'lr_mult' + 2. param_names in no_weight_decay_list will have no decay + 3. model.encoder.layers may need to change for models other than MAE_finetune + """ param_group_names = {} param_groups = {} num_layers = len(model.encoder.layers) + 1 layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1)) - for n, p in model.named_parameters(): - if p.stop_gradient is True: + for name, param in model.named_parameters(): + if param.stop_gradient is True: continue # no decay - if p.ndim == 1 or n in no_weight_decay_list: + if param.ndim == 1 or name.endswith('.bias') or name in no_weight_decay_list: g_decay = 'no_decay' - this_decay = 0. + this_weight_decay = 0. else: g_decay = 'decay' - this_decay = weight_decay + this_weight_decay = weight_decay - layer_id = get_layer_id_for_vit(n, num_layers) + layer_id = get_layer_id_for_vit(name, num_layers) group_name = f"layer_{layer_id}_{g_decay}" if group_name not in param_group_names: this_scale = layer_scales[layer_id] param_group_names[group_name] = { - "learning_rate": this_scale, # TODO: check correctness - "weight_decay": this_decay, + "learning_rate": this_scale, + "weight_decay": this_weight_decay, "params": [], } param_groups[group_name] = { "learning_rate": this_scale, - "weight_decay": this_decay, + "weight_decay": this_weight_decay, "params": [], } - param_group_names[group_name]["params"].append(n) - param_groups[group_name]["params"].append(p) + param_group_names[group_name]["params"].append(name) + param_groups[group_name]["params"].append(param) + + print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) return list(param_groups.values()) def get_layer_id_for_vit(name, num_layers): """assign a parameter with its layer id""" - if name in ['cls_token', 'position_embedding']: + if name in ['cls_token', 'mask_token', 'encoder_position_embedding']: return 0 elif name.startswith('patch_embedding'): return 0 @@ -64,3 +77,10 @@ def get_layer_id_for_vit(name, num_layers): else: return num_layers + +def lr_setting(layer_decay, name_dict, num_layers, param): + layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1)) + static_name = name_dict[param.name] + #print('static_name= ', static_name, ', param.name= ', param.name) + layer_id = get_layer_id_for_vit(static_name, num_layers) + param.optimize_attr["learning_rate"] *= layer_scales[layer_id] diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/image_classification/MAE/main_multi_gpu_finetune.py index a08b9083..e2037b43 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/image_classification/MAE/main_multi_gpu_finetune.py @@ -20,11 +20,13 @@ import logging import argparse import random +import math import numpy as np import paddle import paddle.nn as nn import paddle.nn.functional as F import paddle.distributed as dist +from paddle.distributed import fleet from datasets import get_dataloader from datasets import get_dataset from mixup import Mixup @@ -36,10 +38,12 @@ from utils import get_exclude_from_weight_decay_fn from utils import get_params_groups from utils import cosine_scheduler +from utils import adjust_learning_rate from utils import interpolate_pos_embed import lr_decay from config import get_config from config import update_config +import paddlenlp def get_arguments(): @@ -61,7 +65,7 @@ def get_arguments(): return arguments -def get_logger(filename, logger_name=None): +def get_logger(file_path): """set logging file and format Args: filename: str, full path of the logger file to write @@ -69,15 +73,38 @@ def get_logger(filename, logger_name=None): Return: logger: python logger """ + local_rank = dist.get_rank() + filename = os.path.join(file_path, f'log_all.txt') log_format = "%(asctime)s %(message)s" - logging.basicConfig(stream=sys.stdout, level=logging.INFO, + logging.basicConfig(filename=filename, level=logging.INFO, format=log_format, datefmt="%m%d %I:%M:%S %p") - # different name is needed when creating multiple logger in one process - logger = logging.getLogger(logger_name) - fh = logging.FileHandler(os.path.join(filename)) + + # local_logger for each process/GPU + local_logger = logging.getLogger(f'local_{local_rank}') + filename = os.path.join(file_path, f'log_{local_rank}.txt') + fh = logging.FileHandler(filename) fh.setFormatter(logging.Formatter(log_format)) - logger.addHandler(fh) - return logger + local_logger.addHandler(fh) + ## console + #sh = logging.StreamHandler(sys.stdout) + #sh.setFormatter(logging.Formatter(log_format)) + #local_logger.addHandler(sh) + + # master_logger records avg performance + if local_rank == 0: + master_logger = logging.getLogger('master') + # log.txt + filename = os.path.join(file_path, f'log.txt') + fh = logging.FileHandler(filename) + fh.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(fh) + # console + sh = logging.StreamHandler(sys.stdout) + sh.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(sh) + else: + master_logger = None + return local_logger, master_logger def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): @@ -91,6 +118,7 @@ def write_log(local_logger, master_logger, msg_local, msg_master=None, level='in if master_logger and dist.get_rank() == 0: if msg_master is None: msg_master = msg_local + if level == 'info': master_logger.info("MASTER_LOG " + msg_master) elif level == 'fatal': @@ -99,12 +127,25 @@ def write_log(local_logger, master_logger, msg_local, msg_master=None, level='in raise ValueError("level must in ['info', 'fatal']") +def all_reduce_mean(x): + world_size = dist.get_world_size() + if world_size > 1: + x_reduce = paddle.to_tensor(x) + dist.all_reduce(x_reduce) + x_reduce = x_reduce / world_size + return x_reduce.item() + else: + return x + + def train(dataloader, model, optimizer, criterion, - lr_schedule, + base_lr, + min_lr, epoch, + warmup_epochs, total_epochs, total_batch, debug_steps=100, @@ -119,7 +160,6 @@ def train(dataloader, model: nn.Layer, a ViT model optimizer: nn.optimizer criterion: nn.XXLoss - lr_schedule: list of float, lr schdeule epoch: int, current epoch total_epochs: int, total num of epochs total_batch: int, total num of batches for one epoch @@ -142,35 +182,49 @@ def train(dataloader, master_loss_meter = AverageMeter() master_acc_meter = AverageMeter() + time_st = time.time() + if amp is True: scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 - time_st = time.time() + optimizer.clear_grad() for batch_id, data in enumerate(dataloader): # get data images = data[0] label = data[1] label_orig = label.clone() + batch_size = images.shape[0] if mixup_fn is not None: images, label = mixup_fn(images, label_orig) - # set per iteration lr using scheduler - global_train_iter = total_batch * (epoch - 1) + batch_id # epoch starts from 1 - optimizer.set_lr(lr_schedule[global_train_iter]) + if batch_id % accum_iter == 0: + lr = adjust_learning_rate(optimizer, + base_lr, + min_lr, + batch_id / total_batch + epoch - 1, + warmup_epochs, + total_epochs) # forward with paddle.amp.auto_cast(amp is True): output = model(images) loss = criterion(output, label) + loss_value = loss.item() + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + loss = loss / accum_iter + if not amp: # fp32 loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): optimizer.step() optimizer.clear_grad() else: - scaled = scaler.scale(loss) - scaled.backward() + scaled_loss = scaler.scale(loss) + scaled_loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 scaler.step(optimizer) @@ -179,39 +233,36 @@ def train(dataloader, pred = F.softmax(output) if mixup_fn: - acc = paddle.metric.accuracy(pred, label_orig) + acc = paddle.metric.accuracy(pred, label_orig).item() else: - acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1)) + acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1)).item() # sync from other gpus for overall loss and acc - batch_size = paddle.to_tensor(images.shape[0]) - master_loss = paddle.to_tensor(loss.numpy()) - master_acc = paddle.to_tensor(acc.numpy()) - master_batch_size = paddle.to_tensor(batch_size.numpy()) - dist.all_reduce(master_loss) - dist.all_reduce(master_acc) - dist.all_reduce(master_batch_size) - master_loss = master_loss / dist.get_world_size() - master_acc = master_acc / dist.get_world_size() - - master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) - master_acc_meter.update(master_acc.numpy()[0], master_batch_size.numpy()[0]) - - train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) - train_acc_meter.update(acc.numpy()[0], batch_size.numpy()[0]) - if batch_id % debug_steps == 0: + master_loss = all_reduce_mean(loss_value) + master_acc = all_reduce_mean(acc) + master_batch_size = all_reduce_mean(batch_size) + + master_loss_meter.update(master_loss, master_batch_size) + master_acc_meter.update(master_acc, master_batch_size) + train_loss_meter.update(loss_value, batch_size) + train_acc_meter.update(acc, batch_size) + + if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader): local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {train_loss_meter.avg:.4f}, " + + f"Lr: {optimizer.get_lr():04f}, " + + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), " + f"Avg Acc: {train_acc_meter.avg:.4f}") master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {master_loss_meter.avg:.4f}, " + + f"Lr: {optimizer.get_lr():04f}, " + + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), " + f"Avg Acc: {master_acc_meter.avg:.4f}") write_log(local_logger, master_logger, local_message, master_message) train_time = time.time() - time_st + dist.barrier() return (train_loss_meter.avg, train_acc_meter.avg, master_loss_meter.avg, @@ -258,33 +309,28 @@ def validate(dataloader, # get data images = data[0] label = data[1] + batch_size = images.shape[0] output = model(images) loss = criterion(output, label) + loss_value = loss.item() pred = F.softmax(output) - acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)) - acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5) + acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item() + acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item() # sync from other gpus for overall loss and acc - batch_size = paddle.to_tensor(images.shape[0]) - master_loss = paddle.to_tensor(loss.numpy()) - master_acc1 = paddle.to_tensor(acc1.numpy()) - master_acc5 = paddle.to_tensor(acc5.numpy()) - master_batch_size = paddle.to_tensor(batch_size.numpy()) - dist.all_reduce(master_loss) - dist.all_reduce(master_batch_size) - dist.all_reduce(master_acc1) - dist.all_reduce(master_acc5) - master_loss = master_loss / dist.get_world_size() - master_acc1 = master_acc1 / dist.get_world_size() - master_acc5 = master_acc5 / dist.get_world_size() - master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) - master_acc1_meter.update(master_acc1.numpy()[0], master_batch_size.numpy()[0]) - master_acc5_meter.update(master_acc5.numpy()[0], master_batch_size.numpy()[0]) - val_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) - val_acc1_meter.update(acc1.numpy()[0], batch_size.numpy()[0]) - val_acc5_meter.update(acc5.numpy()[0], batch_size.numpy()[0]) + master_loss = all_reduce_mean(loss_value) + master_acc1 = all_reduce_mean(acc1) + master_acc5 = all_reduce_mean(acc5) + master_batch_size = all_reduce_mean(batch_size) + + master_loss_meter.update(master_loss, master_batch_size) + master_acc1_meter.update(master_acc1, master_batch_size) + master_acc5_meter.update(master_acc5, master_batch_size) + val_loss_meter.update(loss_value, batch_size) + val_acc1_meter.update(acc1, batch_size) + val_acc5_meter.update(acc5, batch_size) if batch_id % debug_steps == 0: local_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + @@ -298,6 +344,7 @@ def validate(dataloader, write_log(local_logger, master_logger, local_message, master_message) val_time = time.time() - time_st + dist.barrier() return (val_loss_meter.avg, val_acc1_meter.avg, val_acc5_meter.avg, @@ -319,23 +366,18 @@ def main_worker(*args): np.random.seed(seed) random.seed(seed) # logger for each process/gpu - local_logger = get_logger( - filename=os.path.join(config.SAVE, 'log_{}.txt'.format(local_rank)), - logger_name='local_logger') - # overall logger - if local_rank == 0: - master_logger = get_logger( - filename=os.path.join(config.SAVE, 'log.txt'), - logger_name='master_logger') - master_logger.info(f'\n{config}') - else: - master_logger = None - + local_logger, master_logger = get_logger(config.SAVE) message = f'----- world_size = {world_size}, local_rank = {local_rank}' write_log(local_logger, master_logger, message) # STEP 1: Create model + paddle.device.set_device('gpu') model = build_model(config) + if dist.get_world_size() > 1: + strategy = fleet.DistributedStrategy() + ## Hybrid Parallel Training + strategy.hybrid_configs = {} + fleet.init(is_collective=True, strategy=strategy) # STEP 2: Create train and val dataloader if not config.EVAL: @@ -372,58 +414,58 @@ def main_worker(*args): # only use cross entropy for val criterion_val = nn.CrossEntropyLoss() - - # STEP 4: Define optimizer and lr_scheduler + # STEP 5: Define optimizer and lr_scheduler # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) if not config.EVAL: if config.TRAIN.LINEAR_SCALED_LR is not None: - linear_scaled_lr = ( - config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - #linear_scaled_warmup_start_lr = ( - # config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - #linear_scaled_end_lr = ( - # config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR + effective_batch_size = config.DATA.BATCH_SIZE * config.TRAIN.ACCUM_ITER * world_size + config.TRAIN.BASE_LR = ( + config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR + ) + write_log(local_logger, master_logger, f'Base lr is scaled to: {config.TRAIN.BASE_LR}') - if config.TRAIN.ACCUM_ITER > 1: - linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER - #linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER - #linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER - - config.TRAIN.BASE_LR = linear_scaled_lr - #config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr - #config.TRAIN.END_LR = linear_scaled_end_lr - - lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale - config.TRAIN.END_LR, - config.TRAIN.NUM_EPOCHS, - len(dataloader_train), - warmup_epochs=config.TRAIN.WARMUP_EPOCHS) - - #params_groups = get_params_groups(model) - params_groups = lr_decay.param_groups_lrd( - model=model, - weight_decay=config.TRAIN.WEIGHT_DECAY, - layer_decay=config.TRAIN.LAYER_DECAY) if config.TRAIN.GRAD_CLIP: clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) else: clip = None - if config.TRAIN.OPTIMIZER.NAME == "SGD": - optimizer = paddle.optimizer.Momentum( - parameters=params_groups, - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, - weight_decay=0.0, #config.TRAIN.WEIGHT_DECAY, set by params_groups - momentum=config.TRAIN.OPTIMIZER.MOMENTUM, - grad_clip=clip) - elif config.TRAIN.OPTIMIZER.NAME == "AdamW": + if config.TRAIN.OPTIMIZER.NAME == "AdamW": + params_groups = lr_decay.param_groups_lrd( + model=model, + no_weight_decay_list=['encoder_position_embedding', 'cls_token'], + weight_decay=config.TRAIN.WEIGHT_DECAY, + layer_decay=config.TRAIN.LAYER_DECAY) optimizer = paddle.optimizer.AdamW( parameters=params_groups, - learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + learning_rate=config.TRAIN.BASE_LR, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + beta1=config.TRAIN.OPTIMIZER.BETAS[0], + beta2=config.TRAIN.OPTIMIZER.BETAS[1], + weight_decay=config.TRAIN.WEIGHT_DECAY, # set by params_groups, this vaule is not effectitve + epsilon=config.TRAIN.OPTIMIZER.EPS, + grad_clip=clip) + elif config.TRAIN.OPTIMIZER.NAME == "AdamWDL": + name_dict = dict() + wd_exclude_list = ['encoder_position_embedding', 'cls_token'] + for n, p in model.named_parameters(): + # name_dict is for AdamWDL argument 'name_dict' + name_dict[p.name] = n + # add no decay param name to weight exclude list, for AramWDL argument 'apply_decay_param_fn' + if p.ndim == 1 or n.endswith('.bias'): + wd_exclude_list.append(n) + print('no_decay param names: ', wd_exclude_list) + + optimizer = paddlenlp.ops.optimizer.AdamWDL( + learning_rate=config.TRAIN.BASE_LR, + weight_decay=config.TRAIN.WEIGHT_DECAY, + layerwise_decay=config.TRAIN.LAYER_DECAY, + n_layers=config.MODEL.TRANS.ENCODER.DEPTH, + set_param_lr_fun=lr_decay.lr_setting, + parameters=model.parameters(), + name_dict=name_dict, + apply_decay_param_fun=get_exclude_from_weight_decay_fn(wd_exclude_list), beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], - weight_decay=0.0, #config.TRAIN.WEIGHT_DECAY, set by params_groups epsilon=config.TRAIN.OPTIMIZER.EPS, grad_clip=clip) else: @@ -431,24 +473,29 @@ def main_worker(*args): write_log(local_logger, master_logger, message, None, 'fatal') raise NotImplementedError(message) - # STEP 5: Load pretrained model / load resumt model and optimizer states + # STEP 6: Load pretrained model / load resumt model and optimizer states if config.MODEL.PRETRAINED: assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True model_state = paddle.load(config.MODEL.PRETRAINED + '.pdparams') if not config.EVAL: - keys = ['encoder_norm.weight', 'encoder_norm.bias', + keys = ['encoder.norm.weight', 'encoder.norm.bias', 'classfier.weight', 'classifier.bias'] if config.MODEL.GLOBAL_POOL: - del model_state[keys[0]] - del model_state[keys[1]] + if keys[0] in model_state: + del model_state[keys[0]] + if keys[1] in model_state: + del model_state[keys[1]] + if keys[2] in model_state: + del model_state[keys[2]] + if keys[3] in model_state: + del model_state[keys[3]] # interpolate position embedding interpolate_pos_embed(model, model_state) - model.set_state_dict(model_state) - message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}.pdparams" write_log(local_logger, master_logger, message) if config.MODEL.RESUME: @@ -458,13 +505,13 @@ def main_worker(*args): model.set_dict(model_state) opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') optimizer.set_state_dict(opt_state) - message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}" + message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}.pdparams/.pdopts" write_log(local_logger, master_logger, message) + + if dist.get_world_size() > 1: + model = fleet.distributed_model(model) - # enable data parallel for distributed - model = paddle.DataParallel(model) - - # STEP 6: Validation (eval mode) + # STEP 7: Validation (eval mode) if config.EVAL: write_log(local_logger, master_logger, f"----- Start Validation") val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( @@ -502,8 +549,10 @@ def main_worker(*args): model=model, optimizer=optimizer, criterion=criterion, - lr_schedule=lr_schedule, + base_lr=config.TRAIN.BASE_LR, + min_lr=config.TRAIN.END_LR, epoch=epoch, + warmup_epochs=config.TRAIN.WARMUP_EPOCHS, total_epochs=config.TRAIN.NUM_EPOCHS, total_batch=total_batch_train, debug_steps=config.REPORT_FREQ, @@ -514,11 +563,13 @@ def main_worker(*args): master_logger=master_logger) local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Lr: {optimizer.get_lr():.4f}, " + f"Train Loss: {train_loss:.4f}, " + f"Train Acc: {train_acc:.4f}, " + f"time: {train_time:.2f}") master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Lr: {optimizer.get_lr():.4f}, " + f"Train Loss: {avg_loss:.4f}, " + f"Train Acc: {avg_acc:.4f}, " + f"time: {train_time:.2f}") @@ -531,7 +582,7 @@ def main_worker(*args): dataloader=dataloader_val, model=model, criterion=criterion_val, - total_batch=total_batch_train, + total_batch=total_batch_val, debug_steps=config.REPORT_FREQ, local_logger=local_logger, master_logger=master_logger) @@ -573,15 +624,17 @@ def main(): config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) if not os.path.exists(config.SAVE): os.makedirs(config.SAVE, exist_ok=True) - # get dataset - if not config.EVAL: - dataset_train = get_dataset(config, mode='train') - else: + # get train dataset if in train mode + if config.EVAL: dataset_train = None + else: + dataset_train = get_dataset(config, mode='train') + # get val dataset dataset_val = get_dataset(config, mode='val') # start training - config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS - dist.spawn(main_worker, args=(config, dataset_train, dataset_val), nprocs=config.NGPUS) + #config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS + #dist.spawn(main_worker, args=(config, dataset_train, dataset_val), nprocs=config.NGPUS) + main_worker(config, dataset_train, dataset_val) if __name__ == "__main__": diff --git a/image_classification/MAE/main_multi_gpu_linearprobe.py b/image_classification/MAE/main_multi_gpu_linearprobe.py deleted file mode 100644 index e7f2cfa1..00000000 --- a/image_classification/MAE/main_multi_gpu_linearprobe.py +++ /dev/null @@ -1,563 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MAE linear probing using multiple GPU """ - -import sys -import os -import time -import logging -import argparse -import random -import numpy as np -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import paddle.distributed as dist -from datasets import get_dataloader -from datasets import get_dataset -from losses import LabelSmoothingCrossEntropyLoss -from losses import SoftTargetCrossEntropyLoss -from transformer import build_transformer as build_model -from utils import AverageMeter -from utils import WarmupCosineScheduler -from utils import get_exclude_from_weight_decay_fn -from utils import get_params_groups -from utils import cosine_scheduler -from utils import interpolate_pos_embed -from config import get_config -from config import update_config - - -def get_arguments(): - """return argumeents, this will overwrite the config after loading yaml file""" - parser = argparse.ArgumentParser('MAE') - parser.add_argument('-cfg', type=str, default=None) - parser.add_argument('-dataset', type=str, default=None) - parser.add_argument('-batch_size', type=int, default=None) - parser.add_argument('-image_size', type=int, default=None) - parser.add_argument('-data_path', type=str, default=None) - parser.add_argument('-output', type=str, default=None) - parser.add_argument('-ngpus', type=int, default=None) - parser.add_argument('-pretrained', type=str, default=None) - parser.add_argument('-resume', type=str, default=None) - parser.add_argument('-last_epoch', type=int, default=None) - parser.add_argument('-eval', action='store_true') - parser.add_argument('-amp', action='store_true') - arguments = parser.parse_args() - return arguments - - -def get_logger(filename, logger_name=None): - """set logging file and format - Args: - filename: str, full path of the logger file to write - logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' - Return: - logger: python logger - """ - log_format = "%(asctime)s %(message)s" - logging.basicConfig(stream=sys.stdout, level=logging.INFO, - format=log_format, datefmt="%m%d %I:%M:%S %p") - # different name is needed when creating multiple logger in one process - logger = logging.getLogger(logger_name) - fh = logging.FileHandler(os.path.join(filename)) - fh.setFormatter(logging.Formatter(log_format)) - logger.addHandler(fh) - return logger - - -def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): - if local_logger: - if level == 'info': - local_logger.info(msg_local) - elif level == 'fatal': - local_logger.fatal(msg_local) - else: - raise ValueError("level must in ['info', 'fatal']") - if master_logger and dist.get_rank() == 0: - if msg_master is None: - msg_master = msg_local - if level == 'info': - master_logger.info("MASTER_LOG " + msg_master) - elif level == 'fatal': - master_logger.fatal("MASTER_LOG " + msg_master) - else: - raise ValueError("level must in ['info', 'fatal']") - - -def train(dataloader, - model, - optimizer, - criterion, - lr_schedule, - epoch, - total_epochs, - total_batch, - debug_steps=100, - accum_iter=1, - amp=False, - local_logger=None, - master_logger=None): - """Training for one epoch - Args: - dataloader: paddle.io.DataLoader, dataloader instance - model: nn.Layer, a ViT model - optimizer: nn.optimizer - criterion: nn.XXLoss - lr_schedule: list of float, lr schdeule - epoch: int, current epoch - total_epochs: int, total num of epochs - total_batch: int, total num of batches for one epoch - debug_steps: int, num of iters to log info, default: 100 - accum_iter: int, num of iters for accumulating gradients, default: 1 - amp: bool, if True, use mix precision training, default: False - local_logger: logger for local process/gpu, default: None - master_logger: logger for main process, default: None - Returns: - train_loss_meter.avg: float, average loss on current process/gpu - train_acc_meter.avg: float, average acc@1 on current process/gpu - master_loss_meter.avg: float, average loss on all processes/gpus - master_acc_meter.avg: float, average acc@1 on all processes/gpus - train_time: float, training time - """ - model.train() - train_loss_meter = AverageMeter() - train_acc_meter = AverageMeter() - master_loss_meter = AverageMeter() - master_acc_meter = AverageMeter() - - if amp is True: - scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 - time_st = time.time() - - for batch_id, data in enumerate(dataloader): - # get data - images = data[0] - label = data[1] - - # set per iteration lr using scheduler - global_train_iter = total_batch * (epoch - 1) + batch_id # epoch starts from 1 - optimizer.set_lr(lr_schedule[global_train_iter]) - # forward - with paddle.amp.auto_cast(amp is True): - output = model(images) - loss = criterion(output, label) - - if not amp: # fp32 - loss.backward() - if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - optimizer.step() - optimizer.clear_grad() - else: - scaled = scaler.scale(loss) - scaled.backward() - if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 - scaler.step(optimizer) - scaler.update() - optimizer.clear_grad() - - pred = F.softmax(output) - acc = paddle.metric.accuracy(pred, label.unsqueeze(1)) - - # sync from other gpus for overall loss and acc - batch_size = paddle.to_tensor(images.shape[0]) - master_loss = paddle.to_tensor(loss.numpy()) - master_acc = paddle.to_tensor(acc.numpy()) - master_batch_size = paddle.to_tensor(batch_size.numpy()) - dist.all_reduce(master_loss) - dist.all_reduce(master_acc) - dist.all_reduce(master_batch_size) - master_loss = master_loss / dist.get_world_size() - master_acc = master_acc / dist.get_world_size() - master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) - master_acc_meter.update(master_acc.numpy()[0], master_batch_size.numpy()[0]) - - train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) - train_acc_meter.update(acc.numpy()[0], batch_size.numpy()[0]) - - if batch_id % debug_steps == 0: - local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {train_loss_meter.avg:.4f}, " + - f"Avg Acc: {train_acc_meter.avg:.4f}") - master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {master_loss_meter.avg:.4f}, " + - f"Avg Acc: {master_acc_meter.avg:.4f}") - write_log(local_logger, master_logger, local_message, master_message) - - train_time = time.time() - time_st - return (train_loss_meter.avg, - train_acc_meter.avg, - master_loss_meter.avg, - master_acc_meter.avg, - train_time) - - -@paddle.no_grad() -def validate(dataloader, - model, - criterion, - total_batch, - debug_steps=100, - local_logger=None, - master_logger=None): - """Validation for the whole dataset - Args: - dataloader: paddle.io.DataLoader, dataloader instance - model: nn.Layer, a ViT model - total_batch: int, total num of batches for one epoch - debug_steps: int, num of iters to log info, default: 100 - local_logger: logger for local process/gpu, default: None - master_logger: logger for main process, default: None - Returns: - val_loss_meter.avg: float, average loss on current process/gpu - val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus - val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus - master_loss_meter.avg: float, average loss on all processes/gpus - master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus - master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus - val_time: float, validation time - """ - model.eval() - val_loss_meter = AverageMeter() - val_acc1_meter = AverageMeter() - val_acc5_meter = AverageMeter() - master_loss_meter = AverageMeter() - master_acc1_meter = AverageMeter() - master_acc5_meter = AverageMeter() - - time_st = time.time() - - for batch_id, data in enumerate(dataloader): - # get data - images = data[0] - label = data[1] - - output = model(images) - loss = criterion(output, label) - - pred = F.softmax(output) - acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)) - acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5) - - # sync from other gpus for overall loss and acc - batch_size = paddle.to_tensor(images.shape[0]) - master_loss = paddle.to_tensor(loss.numpy()) - master_acc1 = paddle.to_tensor(acc1.numpy()) - master_acc5 = paddle.to_tensor(acc5.numpy()) - master_batch_size = paddle.to_tensor(batch_size.numpy()) - dist.all_reduce(master_loss) - dist.all_reduce(master_batch_size) - dist.all_reduce(master_acc1) - dist.all_reduce(master_acc5) - master_loss = master_loss / dist.get_world_size() - master_acc1 = master_acc1 / dist.get_world_size() - master_acc5 = master_acc5 / dist.get_world_size() - master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) - master_acc1_meter.update(master_acc1.numpy()[0], master_batch_size.numpy()[0]) - master_acc5_meter.update(master_acc5.numpy()[0], master_batch_size.numpy()[0]) - val_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) - val_acc1_meter.update(acc1.numpy()[0], batch_size.numpy()[0]) - val_acc5_meter.update(acc5.numpy()[0], batch_size.numpy()[0]) - - if batch_id % debug_steps == 0: - local_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {val_loss_meter.avg:.4f}, " + - f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " + - f"Avg Acc@5: {val_acc5_meter.avg:.4f}") - master_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Avg Loss: {master_loss_meter.avg:.4f}, " + - f"Avg Acc@1: {master_acc1_meter.avg:.4f}, " + - f"Avg Acc@5: {master_acc5_meter.avg:.4f}") - write_log(local_logger, master_logger, local_message, master_message) - - val_time = time.time() - time_st - return (val_loss_meter.avg, - val_acc1_meter.avg, - val_acc5_meter.avg, - master_loss_meter.avg, - master_acc1_meter.avg, - master_acc5_meter.avg, - val_time) - - -def main_worker(*args): - # STEP 0: Preparation - dist.init_parallel_env() - world_size = dist.get_world_size() - local_rank = dist.get_rank() - config = args[0] - last_epoch = config.TRAIN.LAST_EPOCH - seed = config.SEED + local_rank - paddle.seed(seed) - np.random.seed(seed) - random.seed(seed) - # logger for each process/gpu - local_logger = get_logger( - filename=os.path.join(config.SAVE, 'log_{}.txt'.format(local_rank)), - logger_name='local_logger') - # overall logger - if local_rank == 0: - master_logger = get_logger( - filename=os.path.join(config.SAVE, 'log.txt'), - logger_name='master_logger') - master_logger.info(f'\n{config}') - else: - master_logger = None - - message = f'----- world_size = {world_size}, local_rank = {local_rank}' - write_log(local_logger, master_logger, message) - - # STEP 1: Create model - model = build_model(config) - - # STEP 2: Create train and val dataloader - if not config.EVAL: - dataset_train = args[1] - dataloader_train = get_dataloader(config, dataset_train, 'train', True) - total_batch_train = len(dataloader_train) - message = f'----- Total # of train batch (single gpu): {total_batch_train}' - write_log(local_logger, master_logger, message) - - dataset_val = args[2] - dataloader_val = get_dataloader(config, dataset_val, 'val', True) - total_batch_val = len(dataloader_val) - message = f'----- Total # of val batch (single gpu): {total_batch_val}' - write_log(local_logger, master_logger, message) - - # STEP 4: Define criterion - criterion = nn.CrossEntropyLoss() - # only use cross entropy for val - criterion_val = nn.CrossEntropyLoss() - - # STEP 4: Define optimizer and lr_scheduler - # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) - if not config.EVAL: - if config.TRAIN.LINEAR_SCALED_LR is not None: - linear_scaled_lr = ( - config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - #linear_scaled_warmup_start_lr = ( - # config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - #linear_scaled_end_lr = ( - # config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - - if config.TRAIN.ACCUM_ITER > 1: - linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER - #linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER - #linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER - - config.TRAIN.BASE_LR = linear_scaled_lr - #config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr - #config.TRAIN.END_LR = linear_scaled_end_lr - - lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale - config.TRAIN.END_LR, - config.TRAIN.NUM_EPOCHS, - len(dataloader_train), - warmup_epochs=config.TRAIN.WARMUP_EPOCHS) - - params_groups = get_params_groups(model) - - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None - - if config.TRAIN.OPTIMIZER.NAME == "SGD": - optimizer = paddle.optimizer.Momentum( - parameters=params_groups, - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, - weight_decay=config.TRAIN.WEIGHT_DECAY, - momentum=config.TRAIN.OPTIMIZER.MOMENTUM, - grad_clip=clip) - elif config.TRAIN.OPTIMIZER.NAME == "AdamW": - optimizer = paddle.optimizer.AdamW( - parameters=params_groups, - learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, - beta1=config.TRAIN.OPTIMIZER.BETAS[0], - beta2=config.TRAIN.OPTIMIZER.BETAS[1], - weight_decay=config.TRAIN.WEIGHT_DECAY, - epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip) - else: - message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." - write_log(local_logger, master_logger, message, None, 'fatal') - raise NotImplementedError(message) - - # STEP 5: Load pretrained model / load resumt model and optimizer states - if config.MODEL.PRETRAINED: - assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True - model_state = paddle.load(config.MODEL.PRETRAINED + '.pdparams') - - if not config.EVAL: - keys = ['encoder.norm.weight', 'encoder.norm.bias', - 'classfier.weight', 'classifier.bias'] - if config.MODEL.GLOBAL_POOL: - del model_state[keys[0]] - del model_state[keys[1]] - - # interpolate position embedding - interpolate_pos_embed(model, model_state) - - model.set_state_dict(model_state) - message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" - write_log(local_logger, master_logger, message) - - # for linearprobing - model.classifier = nn.Sequential( - nn.BatchNorm1D(model.classifier.weight.shape[0], weight_attr=False, epsilon=1e-6), - model.classifier) - # freeze all but the classifier - for _, p in model.named_parameters(): - p.stop_gradient = True - # set classifier trainable - for _, p in model.classifier.named_parameters(): - p.stop_gradient = False - - if config.MODEL.RESUME: - assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True - assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True - model_state = paddle.load(config.MODEL.RESUME+'.pdparams') - model.set_dict(model_state) - opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') - optimizer.set_state_dict(opt_state) - message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}" - write_log(local_logger, master_logger, message) - - # enable data paralle for distributed - model = paddle.DataParallel(model) - - # STEP 6: Validation (eval mode) - if config.EVAL: - write_log(local_logger, master_logger, f"----- Start Validation") - val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( - dataloader=dataloader_val, - model=model, - criterion=criterion_val, - total_batch=total_batch_train, - debug_steps=config.REPORT_FREQ, - local_logger=local_logger, - master_logger=master_logger) - - local_message = (f"----- Validation: " + - f"Validation Loss: {val_loss:.4f}, " + - f"Validation Acc@1: {val_acc1:.4f}, " + - f"Validation Acc@1: {val_acc5:.4f}, " + - f"time: {val_time:.2f}") - - master_message = (f"----- Validation: " + - f"Validation Loss: {avg_loss:.4f}, " + - f"Validation Acc@1: {avg_acc1:.4f}, " + - f"Validation Acc@1: {avg_acc5:.4f}, " + - f"time: {val_time:.2f}") - write_log(local_logger, master_logger, local_message, master_message) - return - - # STEP 7: Start training (train mode) - write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") - for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): - # train - write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") - - train_loss, train_acc, avg_loss, avg_acc, train_time = train( - dataloader=dataloader_train, - model=model, - optimizer=optimizer, - criterion=criterion, - lr_schedule=lr_schedule, - epoch=epoch, - total_epochs=config.TRAIN.NUM_EPOCHS, - total_batch=total_batch_train, - debug_steps=config.REPORT_FREQ, - accum_iter=config.TRAIN.ACCUM_ITER, - amp=config.AMP, - local_logger=local_logger, - master_logger=master_logger) - - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {train_loss:.4f}, " + - f"Train Acc: {train_acc:.4f}, " + - f"time: {train_time:.2f}") - - master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {avg_loss:.4f}, " + - f"Train Acc: {avg_acc:.4f}, " + - f"time: {train_time:.2f}") - write_log(local_logger, master_logger, local_message, master_message) - - # validation - if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: - write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}') - val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( - dataloader=dataloader_val, - model=model, - criterion=criterion_val, - total_batch=total_batch_train, - debug_steps=config.REPORT_FREQ, - local_logger=local_logger, - master_logger=master_logger) - - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Validation Loss: {val_loss:.4f}, " + - f"Validation Acc@1: {val_acc1:.4f}, " + - f"Validation Acc@1: {val_acc5:.4f}, " + - f"time: {val_time:.2f}") - - master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Validation Loss: {avg_loss:.4f}, " + - f"Validation Acc@1: {avg_acc1:.4f}, " + - f"Validation Acc@1: {avg_acc5:.4f}, " + - f"time: {val_time:.2f}") - write_log(local_logger, master_logger, local_message, master_message) - - # model save - if local_rank == 0: - if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: - model_path = os.path.join( - config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}") - paddle.save(model.state_dict(), model_path + '.pdparams') - paddle.save(optimizer.state_dict(), model_path + '.pdopt') - message = (f"----- Save model: {model_path}.pdparams \n" + - f"----- Save optim: {model_path}.pdopt") - write_log(local_logger, master_logger, message) - - -def main(): - # config is updated by: (1) config.py, (2) yaml file, (3) arguments - arguments = get_arguments() - config = get_config() - config = update_config(config, arguments) - # set output folder - if not config.EVAL: - config.SAVE = '{}/linearprobe-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - else: - config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - if not os.path.exists(config.SAVE): - os.makedirs(config.SAVE, exist_ok=True) - # get dataset - if not config.EVAL: - dataset_train = get_dataset(config, mode='train') - else: - dataset_train = None - dataset_val = get_dataset(config, mode='val') - # start training - config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS - dist.spawn(main_worker, args=(config, dataset_train, dataset_val), nprocs=config.NGPUS) - - -if __name__ == "__main__": - main() diff --git a/image_classification/MAE/main_multi_gpu_pretrain.py b/image_classification/MAE/main_multi_gpu_pretrain.py deleted file mode 100644 index 7e7aa134..00000000 --- a/image_classification/MAE/main_multi_gpu_pretrain.py +++ /dev/null @@ -1,361 +0,0 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MAE pre-training using multiple GPU """ - -import sys -import os -import time -import logging -import argparse -import random -import numpy as np -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import paddle.distributed as dist -from datasets import get_dataloader -from datasets import get_dataset -from transformer import build_mae_pretrain as build_model -from utils import AverageMeter -from utils import WarmupCosineScheduler -from utils import get_exclude_from_weight_decay_fn -from utils import get_params_groups -from utils import cosine_scheduler -from config import get_config -from config import update_config - - -def get_arguments(): - """return argumeents, this will overwrite the config after loading yaml file""" - parser = argparse.ArgumentParser('MAE') - parser.add_argument('-cfg', type=str, default=None) - parser.add_argument('-dataset', type=str, default=None) - parser.add_argument('-batch_size', type=int, default=None) - parser.add_argument('-image_size', type=int, default=None) - parser.add_argument('-data_path', type=str, default=None) - parser.add_argument('-output', type=str, default=None) - parser.add_argument('-ngpus', type=int, default=None) - parser.add_argument('-pretrained', type=str, default=None) - parser.add_argument('-resume', type=str, default=None) - parser.add_argument('-last_epoch', type=int, default=None) - parser.add_argument('-eval', action='store_true') - parser.add_argument('-amp', action='store_true') - arguments = parser.parse_args() - return arguments - - -def get_logger(filename, logger_name=None): - """set logging file and format - Args: - filename: str, full path of the logger file to write - logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' - Return: - logger: python logger - """ - log_format = "%(asctime)s %(message)s" - logging.basicConfig(stream=sys.stdout, level=logging.INFO, - format=log_format, datefmt="%m%d %I:%M:%S %p") - # different name is needed when creating multiple logger in one process - logger = logging.getLogger(logger_name) - fh = logging.FileHandler(os.path.join(filename)) - fh.setFormatter(logging.Formatter(log_format)) - logger.addHandler(fh) - return logger - - -def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): - if local_logger: - if level == 'info': - local_logger.info(msg_local) - elif level == 'fatal': - local_logger.fatal(msg_local) - else: - raise ValueError("level must in ['info', 'fatal']") - if master_logger and dist.get_rank() == 0: - if msg_master is None: - msg_master = msg_local - if level == 'info': - master_logger.info("MASTER_LOG " + msg_master) - elif level == 'fatal': - master_logger.fatal("MASTER_LOG " + msg_master) - else: - raise ValueError("level must in ['info', 'fatal']") - - -def train(dataloader, - model, - mask_ratio, - optimizer, - lr_schedule, - epoch, - total_epochs, - total_batch, - debug_steps=100, - accum_iter=1, - amp=False, - local_logger=None, - master_logger=None): - """Training for one epoch - Args: - dataloader: paddle.io.DataLoader, dataloader instance - model: nn.Layer, a ViT model - mask_ratio: float, percentage of masking patches - optimizer: nn.optimizer - lr_schedule: list of float, lr schdeule - epoch: int, current epoch - total_epochs: int, total num of epochs - total_batch: int, total num of batches for one epoch - debug_steps: int, num of iters to log info, default: 100 - accum_iter: int, num of iters for accumulating gradients, default: 1 - amp: bool, if True, use mix precision training, default: False - local_logger: logger for local process/gpu, default: None - master_logger: logger for main process, default: None - Returns: - train_loss_meter.avg: float, average loss on current process/gpu - master_loss_meter.avg: float, average loss on all processes/gpus - train_time: float, training time - """ - model.train() - train_loss_meter = AverageMeter() - master_loss_meter = AverageMeter() - - if amp is True: - scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 - time_st = time.time() - - for batch_id, data in enumerate(dataloader): - # get data - images = data[0] - # set per iteration lr using scheduler - global_train_iter = total_batch * (epoch - 1) + batch_id # epoch starts from 1 - optimizer.set_lr(lr_schedule[global_train_iter]) - # forward - with paddle.amp.auto_cast(amp is True): - loss, _, _ = model(images, mask_ratio) - - if not amp: # fp32 - loss.backward() - if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - optimizer.step() - optimizer.clear_grad() - else: - scaled = scaler.scale(loss) - scaled.backward() - if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 - scaler.step(optimizer) - scaler.update() - optimizer.clear_grad() - - # sync from other gpus for overall loss and acc - batch_size = paddle.to_tensor(images.shape[0]) - master_loss = paddle.to_tensor(loss.numpy()) - master_batch_size = paddle.to_tensor(batch_size.numpy()) - dist.all_reduce(master_loss) - dist.all_reduce(master_batch_size) - master_loss = master_loss / dist.get_world_size() - master_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0]) - train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0]) - - if batch_id % debug_steps == 0: - local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"LR: {optimizer.get_lr():.6e}, " + - f"Avg Loss: {train_loss_meter.avg:.4f}") - master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"LR: {optimizer.get_lr():.6e}, " + - f"Avg Loss: {master_loss_meter.avg:.4f}") - write_log(local_logger, master_logger, local_message, master_message) - - train_time = time.time() - time_st - return train_loss_meter.avg, master_loss_meter.avg, train_time - - -def main_worker(*args): - # STEP 0: Preparation - dist.init_parallel_env() - world_size = dist.get_world_size() - local_rank = dist.get_rank() - config = args[0] - last_epoch = config.TRAIN.LAST_EPOCH - seed = config.SEED + local_rank - paddle.seed(seed) - np.random.seed(seed) - random.seed(seed) - # logger for each process/gpu - local_logger = get_logger( - filename=os.path.join(config.SAVE, 'log_{}.txt'.format(local_rank)), - logger_name='local_logger') - # overall logger - if local_rank == 0: - master_logger = get_logger( - filename=os.path.join(config.SAVE, 'log.txt'), - logger_name='master_logger') - master_logger.info(f'\n{config}') - else: - master_logger = None - - message = f'----- world_size = {world_size}, local_rank = {local_rank}' - write_log(local_logger, master_logger, message) - - # STEP 1: Create model - model = build_model(config) - model = paddle.DataParallel(model) - - # STEP 2: Create train and val dataloader - dataset_train = args[1] - dataloader_train = get_dataloader(config, dataset_train, 'train', True) - total_batch_train = len(dataloader_train) - message = f'----- Total # of train batch (single gpu): {total_batch_train}' - write_log(local_logger, master_logger, message) - - # STEP 3: Define criterion: loss is defined in model - #criterion = nn.MSELoss() - - # STEP 4: Define optimizer and lr_scheduler - # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) - if config.TRAIN.LINEAR_SCALED_LR is not None: - linear_scaled_lr = ( - config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - #linear_scaled_warmup_start_lr = ( - # config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - #linear_scaled_end_lr = ( - # config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR - - if config.TRAIN.ACCUM_ITER > 1: - linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER - #linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER - #linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER - - config.TRAIN.BASE_LR = linear_scaled_lr - #config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr - #config.TRAIN.END_LR = linear_scaled_end_lr - - lr_schedule = cosine_scheduler(config.TRAIN.BASE_LR, # add linear scale - config.TRAIN.END_LR, - config.TRAIN.NUM_EPOCHS, - len(dataloader_train), - warmup_epochs=config.TRAIN.WARMUP_EPOCHS) - - params_groups = get_params_groups(model) - - if config.TRAIN.GRAD_CLIP: - clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) - else: - clip = None - - if config.TRAIN.OPTIMIZER.NAME == "SGD": - optimizer = paddle.optimizer.Momentum( - parameters=params_groups, - learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR, - weight_decay=config.TRAIN.WEIGHT_DECAY, - momentum=config.TRAIN.OPTIMIZER.MOMENTUM, - grad_clip=clip) - elif config.TRAIN.OPTIMIZER.NAME == "AdamW": - optimizer = paddle.optimizer.AdamW( - parameters=params_groups, - learning_rate=0.0, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, - beta1=config.TRAIN.OPTIMIZER.BETAS[0], - beta2=config.TRAIN.OPTIMIZER.BETAS[1], - weight_decay=config.TRAIN.WEIGHT_DECAY, - epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip) - else: - message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." - write_log(local_logger, master_logger, message, None, 'fatal') - raise NotImplementedError(message) - - # STEP 5: Load pretrained model / load resumt model and optimizer states - if config.MODEL.PRETRAINED: - assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True - model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams') - model.set_dict(model_state) - message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" - write_log(local_logger, master_logger, message) - - if config.MODEL.RESUME: - assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True - assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True - model_state = paddle.load(config.MODEL.RESUME+'.pdparams') - model.set_dict(model_state) - opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') - optimizer.set_state_dict(opt_state) - message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}" - write_log(local_logger, master_logger, message) - if config.TRAIN.LAST_EPOCH == -1: - message = f"----- Resume Training: LAST_EPOCH should not be [-1]" - write_log(local_logger, master_logger, message, None, 'fatal') - - # STEP 6: Start training (train mode) - write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") - for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): - # train - write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") - - train_loss, avg_loss, train_time = train( - dataloader=dataloader_train, - model=model, - mask_ratio=config.MODEL.TRANS.MASK_RATIO, - optimizer=optimizer, - lr_schedule=lr_schedule, - epoch=epoch, - total_epochs=config.TRAIN.NUM_EPOCHS, - total_batch=total_batch_train, - debug_steps=config.REPORT_FREQ, - accum_iter=config.TRAIN.ACCUM_ITER, - amp=config.AMP, - local_logger=local_logger, - master_logger=master_logger) - - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {train_loss:.4f}, " + - f"time: {train_time:.2f}") - - master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Train Loss: {avg_loss:.4f}, " + - f"time: {train_time:.2f}") - write_log(local_logger, master_logger, local_message, master_message) - - # model save - if local_rank == 0: - if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: - model_path = os.path.join( - config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}") - paddle.save(model.state_dict(), model_path + '.pdparams') - paddle.save(optimizer.state_dict(), model_path + '.pdopt') - message = (f"----- Save model: {model_path}.pdparams \n" + - f"----- Save optim: {model_path}.pdopt") - write_log(local_logger, master_logger, message) - - -def main(): - # config is updated by: (1) config.py, (2) yaml file, (3) arguments - arguments = get_arguments() - config = get_config() - config = update_config(config, arguments) - # set output folder - config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) - if not os.path.exists(config.SAVE): - os.makedirs(config.SAVE, exist_ok=True) - # get dataset - dataset_train = get_dataset(config, mode='train') - # start training - config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS - dist.spawn(main_worker, args=(config, dataset_train, ), nprocs=config.NGPUS) - - -if __name__ == "__main__": - main() diff --git a/image_classification/MAE/pos_embed.py b/image_classification/MAE/pos_embed.py new file mode 100644 index 00000000..c6c95853 --- /dev/null +++ b/image_classification/MAE/pos_embed.py @@ -0,0 +1,56 @@ +import numpy as np + +# -------------------------------------------------------- +# 2D sine-cosine position embedding +# References: +# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py +# MoCo v3: https://github.com/facebookresearch/moco-v3 +# -------------------------------------------------------- +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + grid_h = np.arange(grid_size, dtype=np.float32) + grid_w = np.arange(grid_size, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size, grid_size]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float) + omega /= embed_dim / 2. + omega = 1. / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb diff --git a/image_classification/MAE/run_finetune_multi.sh b/image_classification/MAE/run_finetune_multi.sh deleted file mode 100644 index 174cd949..00000000 --- a/image_classification/MAE/run_finetune_multi.sh +++ /dev/null @@ -1,9 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ -python main_multi_gpu_finetune.py \ --cfg='./configs/vit_base_patch16_224_finetune_single_node.yaml' \ --dataset='imagenet2012' \ --batch_size=32 \ --data_path='/dataset/imagenet' \ --pretrained='./mae_pretrain_vit_base' \ --amp \ -#-eval diff --git a/image_classification/MAE/run_finetune_vit_b.sh b/image_classification/MAE/run_finetune_vit_b.sh new file mode 100644 index 00000000..12585cd1 --- /dev/null +++ b/image_classification/MAE/run_finetune_vit_b.sh @@ -0,0 +1,9 @@ +#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +#python main_multi_gpu_finetune.py \ +GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_finetune.py \ +-cfg='./configs/vit_base_patch16_224_finetune.yaml' \ +-dataset='imagenet2012' \ +-batch_size=32 \ +-data_path='/dataset/imagenet' \ +-pretrained='./mae_vit_base_patch16' \ +-amp \ diff --git a/image_classification/MAE/run_finetune_vit_b_single_node.sh b/image_classification/MAE/run_finetune_vit_b_single_node.sh new file mode 100644 index 00000000..79ba7514 --- /dev/null +++ b/image_classification/MAE/run_finetune_vit_b_single_node.sh @@ -0,0 +1,9 @@ +#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +#python main_multi_gpu_finetune.py \ +GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_finetune.py \ +-cfg='./configs/vit_base_patch16_224_finetune_single_node.yaml' \ +-dataset='imagenet2012' \ +-batch_size=32 \ +-data_path='/dataset/imagenet' \ +-pretrained='./mae_vit_base_patch16' \ +-amp \ diff --git a/image_classification/MAE/run_linear_probe_multi.sh b/image_classification/MAE/run_linear_probe_multi.sh deleted file mode 100644 index 2400bb3c..00000000 --- a/image_classification/MAE/run_linear_probe_multi.sh +++ /dev/null @@ -1,8 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ -python main_multi_gpu_linearprobe.py \ --cfg='./configs/vit_base_patch16_224_linearprobe.yaml' \ --dataset='imagenet2012' \ --batch_size=512 \ --data_path='/dataset/imagenet' \ --amp \ --pretrained='./mae_pretrain_vit_base' diff --git a/image_classification/MAE/run_pretrain_multi.sh b/image_classification/MAE/run_pretrain_multi.sh deleted file mode 100644 index ec9bbe03..00000000 --- a/image_classification/MAE/run_pretrain_multi.sh +++ /dev/null @@ -1,7 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ -python main_multi_gpu_pretrain.py \ --cfg='./configs/vit_base_patch16_224_pretrain_dec1.yaml' \ --dataset='imagenet2012' \ --batch_size=8 \ --data_path='/dataset/imagenet' \ --amp diff --git a/image_classification/MAE/run_pretrain_multi_resume.sh b/image_classification/MAE/run_pretrain_multi_resume.sh deleted file mode 100644 index 5eb4293c..00000000 --- a/image_classification/MAE/run_pretrain_multi_resume.sh +++ /dev/null @@ -1,9 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ -python main_multi_gpu_pretrain.py \ --cfg='./configs/vit_base_patch16_224_pretrain_dec1.yaml' \ --dataset='imagenet2012' \ --batch_size=256 \ --data_path='/dataset/imagenet' \ --resume='./output/train-20220125-17-48-06/PRETRAIN-Epoch-99-Loss-0.5566961133140487' \ --last_epoch=99 \ --amp diff --git a/image_classification/MAE/stat_define.py b/image_classification/MAE/stat_define.py deleted file mode 100644 index 207cef5f..00000000 --- a/image_classification/MAE/stat_define.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import glob -import paddle -from config import get_config -from transformer import build_transformer as build_model -#from transformer import build_mae_pretrain as build_model - -def count_gelu(layer, inputs, output): - activation_flops = 8 - x = inputs[0] - num = x.numel() - layer.total_ops += num * activation_flops - - -def count_softmax(layer, inputs, output): - softmax_flops = 5 # max/substract, exp, sum, divide - x = inputs[0] - num = x.numel() - layer.total_ops += num * softmax_flops - - -def count_layernorm(layer, inputs, output): - layer_norm_flops = 5 # get mean (sum), get variance (square and sum), scale(multiply) - x = inputs[0] - num = x.numel() - layer.total_ops += num * layer_norm_flops - - -cfg = './configs/vit_large_patch16_224_finetune.yaml' -input_size = (1, 3, 224, 224) -#input_size = (1, 3, 384, 384) -config = get_config(cfg) -model = build_model(config) - -custom_ops = {paddle.nn.GELU: count_gelu, - paddle.nn.LayerNorm: count_layernorm, - paddle.nn.Softmax: count_softmax, - } -print(os.path.basename(cfg)) -paddle.flops(model, - input_size=input_size, - custom_ops=custom_ops, - print_detail=False) - - -#for cfg in glob.glob('./configs/*.yaml'): -# #cfg = './configs/swin_base_patch4_window7_224.yaml' -# input_size = (1, 3, int(cfg[-8:-5]), int(cfg[-8:-5])) -# config = get_config(cfg) -# model = build_model(config) -# -# -# custom_ops = {paddle.nn.GELU: count_gelu, -# paddle.nn.LayerNorm: count_layernorm, -# paddle.nn.Softmax: count_softmax, -# } -# print(os.path.basename(cfg)) -# paddle.flops(model, -# input_size=input_size, -# custom_ops=custom_ops, -# print_detail=False) -# print('-----------') diff --git a/image_classification/MAE/transformer.py b/image_classification/MAE/transformer.py index f1fadc87..c6481e5d 100644 --- a/image_classification/MAE/transformer.py +++ b/image_classification/MAE/transformer.py @@ -24,6 +24,7 @@ import paddle.nn.functional as F from droppath import DropPath from config import get_config +from pos_embed import get_2d_sincos_pos_embed class Identity(nn.Layer): @@ -39,7 +40,9 @@ def forward(self, x): def get_position_encoding(seq_len, embed_dim): - """ sinusoid position encoding table""" + """ sinusoid position encoding table + Note: not used in MAE, use get_2d_sincos_pos_embed instead + """ def get_position_angle_vec(embed_dim, position): return [position / np.power(10000, 2 * (hid_j // 2) / embed_dim) for hid_j in range(embed_dim)] @@ -131,7 +134,6 @@ def __init__(self, def _init_weights(self): weight_attr = paddle.ParamAttr( - #initializer=nn.initializer.TruncatedNormal(std=.02)) initializer=nn.initializer.XavierUniform()) # MAE bias_attr = paddle.ParamAttr( initializer=nn.initializer.Constant(0.0)) @@ -196,7 +198,6 @@ def __init__(self, def _init_weights(self): weight_attr = paddle.ParamAttr( - #initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) initializer=paddle.nn.initializer.XavierUniform()) # MAE bias_attr = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0)) @@ -413,7 +414,7 @@ def __init__(self, encoder_depth=12, decoder_depth=8, encoder_num_heads=12, - decoder_num_heads=8, + decoder_num_heads=16, mlp_ratio=4, qkv_bias=True, dropout=0., @@ -435,15 +436,18 @@ def __init__(self, shape=[1, 1, encoder_embed_dim], dtype='float32', default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) #MAE - + + pos_embed = get_2d_sincos_pos_embed(embed_dim=encoder_embed_dim, + grid_size= int(self.num_patches ** 0.5), + cls_token=True) self.encoder_position_embedding = paddle.create_parameter( shape=[1, 1 + self.num_patches, encoder_embed_dim], dtype='float32', default_initializer=paddle.nn.initializer.Assign( - get_position_encoding(seq_len=1 + self.num_patches, - embed_dim=encoder_embed_dim) + paddle.to_tensor(pos_embed, dtype='float32').unsqueeze(0) ) ) + self.encoder_position_embedding.stop_gradient = True self.encoder = Encoder( encoder_embed_dim, @@ -469,14 +473,17 @@ def __init__(self, dtype='float32', default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) #MAE + pos_embed = get_2d_sincos_pos_embed(embed_dim=decoder_embed_dim, + grid_size= int(self.num_patches ** 0.5), + cls_token=True) self.decoder_position_embedding = paddle.create_parameter( shape=[1, 1 + self.num_patches, decoder_embed_dim], dtype='float32', default_initializer=paddle.nn.initializer.Assign( - get_position_encoding(seq_len=1 + self.num_patches, - embed_dim=decoder_embed_dim) + paddle.to_tensor(pos_embed, dtype='float32').unsqueeze(0) ) ) + self.decoder_position_embedding.stop_gradient = True self.decoder = Decoder( decoder_embed_dim, @@ -500,7 +507,6 @@ def __init__(self, def _init_weights(self): weight_attr = paddle.ParamAttr( - #initializer=nn.initializer.TruncatedNormal(std=.02)) initializer=nn.initializer.XavierUniform()) # MAE bias_attr = paddle.ParamAttr( initializer=nn.initializer.Constant(0.0)) @@ -653,7 +659,7 @@ def __init__(self, embed_dim, dropout) # create positional embedding - self.position_embedding = paddle.create_parameter( + self.encoder_position_embedding = paddle.create_parameter( shape=[1, 1 + self.patch_embedding.n_patches, embed_dim], dtype='float32', default_initializer=paddle.nn.initializer.Assign( @@ -675,16 +681,17 @@ def __init__(self, dropout, attention_dropout, droppath, - has_norm=False) + has_norm=not global_pool) # define encoder norm here to aviod cls_token (when global_pool is True) - w_attr, b_attr = self._init_weights_norm() - self.encoder_norm = nn.LayerNorm(embed_dim, - weight_attr=w_attr, - bias_attr=b_attr, - epsilon=1e-6) + if global_pool: + w_attr, b_attr = self._init_weights_norm() + self.encoder_norm = nn.LayerNorm(embed_dim, + weight_attr=w_attr, + bias_attr=b_attr, + epsilon=1e-6) # classifier head (for finetuning) - w_attr_1, b_attr_1 = self._init_weights_linear() + w_attr_1, b_attr_1 = self._init_weights_classifier() self.classifier = nn.Linear(embed_dim, num_classes, weight_attr=w_attr_1, @@ -695,14 +702,14 @@ def forward_features(self, x): x = self.patch_embedding(x) cls_tokens = self.cls_token.expand((x.shape[0], -1, -1)) x = paddle.concat((cls_tokens, x), axis=1) - x = x + self.position_embedding + x = x + self.encoder_position_embedding x = self.encoder(x) if self.global_pool: - x = x[:, 1:, :].mean(axis=1) # global poll w/o cls_token + x = x[:, 1:, :].mean(axis=1) # global pool w/o cls_token out = self.encoder_norm(x) else: - x = self.encoder_norm(x) + # norm is applied in encoder out = x[:, 0] # return cls_token only return out @@ -719,11 +726,16 @@ def _init_weights_norm(self): return weight_attr, bias_attr def _init_weights_linear(self): - #weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=.02)) weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()) # MAE bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) return weight_attr, bias_attr + def _init_weights_classifier(self): + #weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=2e-5)) + weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) + bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) + return weight_attr, bias_attr + def build_mae_pretrain(config): """ build MAE vit model for pretraining""" @@ -750,6 +762,7 @@ def build_transformer(config): model = MAETransformer(image_size=config.DATA.IMAGE_SIZE, patch_size=config.MODEL.TRANS.PATCH_SIZE, in_channels=3, + num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.TRANS.ENCODER.EMBED_DIM, depth=config.MODEL.TRANS.ENCODER.DEPTH, num_heads=config.MODEL.TRANS.ENCODER.NUM_HEADS, diff --git a/image_classification/MAE/utils.py b/image_classification/MAE/utils.py index eae144dc..10946200 100644 --- a/image_classification/MAE/utils.py +++ b/image_classification/MAE/utils.py @@ -24,7 +24,7 @@ import paddle from paddle.optimizer.lr import LRScheduler -def get_params_groups(model): +def get_params_groups(model, weight_decay=0.01): regularized = [] not_regularized = [] for name, param in model.named_parameters(): @@ -35,7 +35,7 @@ def get_params_groups(model): not_regularized.append(param) else: regularized.append(param) - return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}] + return [{'params': regularized, 'weight_decay': weight_decay}, {'params': not_regularized, 'weight_decay': 0.}] def cosine_scheduler(base_value, @@ -57,12 +57,27 @@ def cosine_scheduler(base_value, return schedule -def interpolate_pos_embed(model, state_dict): - if 'position_embedding' in state_dict: - pos_embed_w = state_dict['position_embedding'] +def adjust_learning_rate(optimizer, + base_lr, + min_lr, + cur_epoch, + warmup_epochs, + total_epochs): + if cur_epoch < warmup_epochs: + lr = base_lr * cur_epoch / warmup_epochs + else: + lr = min_lr + (base_lr - min_lr) * 0.5 * ( + 1. + math.cos(math.pi * (cur_epoch - warmup_epochs) / (total_epochs - warmup_epochs))) + optimizer.set_lr(lr) + return lr + + +def interpolate_pos_embed(model, state_dict, key_name='encoder_position_embedding'): + if key_name in state_dict: + pos_embed_w = state_dict[key_name] embed_dim = pos_embed_w.shape[-1] n_patches = model.patch_embedding.n_patches - n_extra_tokens = model.position_embedding.shape[-2] - n_patches # seq_l - n_patches + n_extra_tokens = getattr(model, key_name).shape[-2] - n_patches orig_size = int((pos_embed_w.shape[-2] - n_extra_tokens) ** 0.5) new_size = int(n_patches ** 0.5) if orig_size != new_size: @@ -75,7 +90,7 @@ def interpolate_pos_embed(model, state_dict): pos_tokens = pos_tokens.transpose([0, 2, 3, 1]) pos_tokens = pos_tokens.flatten(1, 2) new_pos_embed = paddle.concat([extra_tokens, pos_tokens], axis=1) - state_dict['position_embedding'] = new_pos_embed + state_dict[key_name] = new_pos_embed #TODO: check correctness From ef354348fa4c15e97fb2d20ceb2ffc71a321d617 Mon Sep 17 00:00:00 2001 From: xperzy Date: Fri, 4 Mar 2022 11:02:05 +0800 Subject: [PATCH 09/12] fix bugs --- .../vit_base_patch16_224_pretrain_dec1.yaml | 33 ++ .../MAE/load_pytorch_weights.py | 95 +++- .../MAE/load_pytorch_weights_finetune.py | 14 +- .../MAE/main_multi_gpu_finetune.py | 12 +- .../MAE/main_multi_gpu_pretrain.py | 422 ++++++++++++++++++ .../MAE/run_pretrain_vit_b.sh | 8 + .../MAE/run_pretrain_vit_b_debug.sh | 8 + image_classification/MAE/transformer.py | 24 +- 8 files changed, 579 insertions(+), 37 deletions(-) create mode 100644 image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml create mode 100644 image_classification/MAE/main_multi_gpu_pretrain.py create mode 100644 image_classification/MAE/run_pretrain_vit_b.sh create mode 100644 image_classification/MAE/run_pretrain_vit_b_debug.sh diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml b/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml new file mode 100644 index 00000000..fb50c6bd --- /dev/null +++ b/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml @@ -0,0 +1,33 @@ +DATA: + IMAGE_SIZE: 224 + CROP_PCT: 0.875 +MODEL: + TYPE: PRETRAIN + NAME: vit_base_patch16_224 + DROPPATH: 0.0 + TRANS: + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + MASK_RATIO: 0.75 + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 + DECODER: + EMBED_DIM: 512 + DEPTH: 1 + NUM_HEADS: 16 + NORM_PIX_LOSS: True +TRAIN: + NUM_EPOCHS: 800 + WARMUP_EPOCHS: 40 + WEIGHT_DECAY: 0.05 + BASE_LR: 1.5e-4 + END_LR: 0.0 + LINEAR_SCALED_LR: 256 + GRAD_CLIP: None + ACCUM_ITER: 1 + OPTIMIZER: + NAME: 'AdamW' + BETAS: (0.9, 0.95) diff --git a/image_classification/MAE/load_pytorch_weights.py b/image_classification/MAE/load_pytorch_weights.py index 04d47874..18e0fd86 100644 --- a/image_classification/MAE/load_pytorch_weights.py +++ b/image_classification/MAE/load_pytorch_weights.py @@ -29,14 +29,30 @@ np.random.seed(seed) random.seed(seed) -#model_name = 'mae_vit_huge_patch14' -#config = get_config(f'./configs/vit_huge_patch14_224_pretrain.yaml') -#model_name = 'mae_vit_large_patch16' -#config = get_config(f'./configs/vit_large_patch16_224_pretrain.yaml') +#model_type = 'base' +#model_type = 'large' +model_type = 'huge' + +if model_type == 'base': + model_name = 'mae_vit_base_patch16' + config = get_config(f'./configs/vit_base_patch16_224_pretrain.yaml') + pth_model_path = './mae_pretrain_vit_base.pth' + pd_model_path = './mae_pretrain_vit_base.pdparams' + npatches = 196 +elif model_type == 'large': + model_name = 'mae_vit_large_patch16' + config = get_config(f'./configs/vit_large_patch16_224_pretrain.yaml') + pth_model_path = './mae_pretrain_vit_large.pth' + pd_model_path = './mae_pretrain_vit_large.pdparams' + npatches = 196 +elif model_type == 'huge': + model_name = 'mae_vit_huge_patch14' + config = get_config(f'./configs/vit_huge_patch14_224_pretrain.yaml') + pth_model_path = './mae_pretrain_vit_huge.pth' + pd_model_path = './mae_pretrain_vit_huge.pdparams' + npatches = 256 -model_name = 'mae_vit_base_patch16' -config = get_config(f'./configs/vit_base_patch16_224_pretrain.yaml') def print_model_named_params(model): print('----------------------------------') @@ -161,7 +177,7 @@ def _set_value(th_name, pd_name, transpose=True): def main(): - + paddle.set_device('cpu') paddle_model = build_model(config) paddle_model.eval() @@ -174,7 +190,7 @@ def main(): torch_model = models_mae.__dict__[model_name](norm_pix_loss=True) print_model_named_params(torch_model) print_model_named_buffers(torch_model) - state_dict = torch.load('./mae_pretrain_vit_base.pth', map_location='cpu')['model'] + state_dict = torch.load(pth_model_path, map_location='cpu')['model'] print('===========================') for key in state_dict: print(key) @@ -187,14 +203,18 @@ def main(): paddle_model = convert(torch_model, paddle_model) # check correctness - x = np.random.randn(2, 3, 224, 224).astype('float32') + x = np.random.randn(4, 3, 224, 224).astype('float32') x_paddle = paddle.to_tensor(x) x_torch = torch.Tensor(x).to(device) + + # manually set the same rand probs(noise) for random masking + rp = np.random.rand(4, npatches) + rand_probs = paddle.to_tensor(rp) + noise = torch.Tensor(rp) - #out_torch = torch_model(x_torch)[1] - #out_paddle = paddle_model(x_paddle)[1] - out_torch = torch_model.forward_encoder(x_torch, 0.0)[0] - out_paddle = paddle_model.forward_encoder(x_paddle, 0.0)[0] + # encoder out + out_torch = torch_model.forward_encoder(x_torch, 0.75, noise)[0] + out_paddle = paddle_model.forward_encoder(x_paddle, 0.75, rand_probs)[0] out_torch = out_torch.data.cpu().numpy() out_paddle = out_paddle.cpu().numpy() @@ -205,10 +225,53 @@ def main(): print(out_paddle[0, 0:100]) assert np.allclose(out_torch, out_paddle, atol = 1e-5) + + # encoder out: mask + out_torch = torch_model.forward_encoder(x_torch, 0.75, noise)[1] + out_paddle = paddle_model.forward_encoder(x_paddle, 0.75, rand_probs)[1] + + out_torch = out_torch.data.cpu().numpy() + out_paddle = out_paddle.cpu().numpy() + + print(out_torch.shape, out_paddle.shape) + print(out_torch[0, 0:100]) + print('========================================================') + print(out_paddle[0, 0:100]) + assert np.allclose(out_torch, out_paddle, atol = 1e-5) + + + + # manually set the same rand probs(noise) for random masking + rp = np.random.rand(4, npatches) + rand_probs = paddle.to_tensor(rp) + noise = torch.Tensor(rp) + # [0]: loss, [1]: decoder_out + out_torch = torch_model(x_torch, 0.75, noise)[0] + out_paddle = paddle_model(x_paddle, 0.75, rand_probs)[0] + + out_torch = out_torch.data.cpu().numpy() + out_paddle = out_paddle.cpu().numpy() + + print('torch loss = ', out_torch) + print('paddle loss = ', out_paddle) + + print(out_torch.shape, out_paddle.shape) + #print(out_torch[0, 0:100]) + #print('========================================================') + #print(out_paddle[0, 0:100]) + #print('--------------------------------------------------------') + #print(out_torch[1, 0:100]) + #print('========================================================') + #print(out_paddle[1, 0:100]) + assert np.allclose(out_torch, out_paddle, atol = 1e-5) + #assert np.allclose(out_torch[0, :, :], out_paddle[0, :, :], atol = 1e-5) + #assert np.allclose(out_torch[1, :, :], out_paddle[1, :, :], atol = 1e-5) + + ## save weights for paddle model - #model_path = os.path.join(f'./{model_name}.pdparams') - #paddle.save(paddle_model.state_dict(), model_path) - #print('all done') + model_path = os.path.join(f'./{pd_model_path}') + paddle.save(paddle_model.state_dict(), model_path) + print('all done') if __name__ == "__main__": diff --git a/image_classification/MAE/load_pytorch_weights_finetune.py b/image_classification/MAE/load_pytorch_weights_finetune.py index e1acab62..78ac7ceb 100644 --- a/image_classification/MAE/load_pytorch_weights_finetune.py +++ b/image_classification/MAE/load_pytorch_weights_finetune.py @@ -24,17 +24,17 @@ ## vit-base #model_path='./mae_finetuned_vit_base' #model_name = 'vit_base_patch16' -#config = get_config(f'./configs/vit_base_patch16_224_finetune.yaml') +config = get_config(f'./configs/vit_base_patch16_224_finetune.yaml') # vit-large -#model_path='./mae_finetuned_vit_large' -#model_name = 'vit_large_patch16' -#config = get_config(f'./configs/vit_large_patch16_224_finetune.yaml') +model_path='./mae_finetuned_vit_large' +model_name = 'vit_large_patch16' +config = get_config(f'./configs/vit_large_patch16_224_finetune.yaml') # vit-huge -model_path='./mae_finetuned_vit_huge' -model_name = 'vit_huge_patch14' -config = get_config(f'./configs/vit_huge_patch14_224_finetune.yaml') +#model_path='./mae_finetuned_vit_huge' +#model_name = 'vit_huge_patch14' +#config = get_config(f'./configs/vit_huge_patch14_224_finetune.yaml') def print_model_named_params(model): diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/image_classification/MAE/main_multi_gpu_finetune.py index e2037b43..a1cc90fc 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/image_classification/MAE/main_multi_gpu_finetune.py @@ -356,7 +356,7 @@ def validate(dataloader, def main_worker(*args): # STEP 0: Preparation - dist.init_parallel_env() + #dist.init_parallel_env() world_size = dist.get_world_size() local_rank = dist.get_rank() config = args[0] @@ -526,13 +526,13 @@ def main_worker(*args): local_message = (f"----- Validation: " + f"Validation Loss: {val_loss:.4f}, " + f"Validation Acc@1: {val_acc1:.4f}, " + - f"Validation Acc@1: {val_acc5:.4f}, " + + f"Validation Acc@5: {val_acc5:.4f}, " + f"time: {val_time:.2f}") master_message = (f"----- Validation: " + f"Validation Loss: {avg_loss:.4f}, " + f"Validation Acc@1: {avg_acc1:.4f}, " + - f"Validation Acc@1: {avg_acc5:.4f}, " + + f"Validation Acc@5: {avg_acc5:.4f}, " + f"time: {val_time:.2f}") write_log(local_logger, master_logger, local_message, master_message) return @@ -590,13 +590,13 @@ def main_worker(*args): local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + f"Validation Loss: {val_loss:.4f}, " + f"Validation Acc@1: {val_acc1:.4f}, " + - f"Validation Acc@1: {val_acc5:.4f}, " + + f"Validation Acc@5: {val_acc5:.4f}, " + f"time: {val_time:.2f}") master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + f"Validation Loss: {avg_loss:.4f}, " + f"Validation Acc@1: {avg_acc1:.4f}, " + - f"Validation Acc@1: {avg_acc5:.4f}, " + + f"Validation Acc@5: {avg_acc5:.4f}, " + f"time: {val_time:.2f}") write_log(local_logger, master_logger, local_message, master_message) @@ -604,7 +604,7 @@ def main_worker(*args): if local_rank == 0: if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: model_path = os.path.join( - config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}") + config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{avg_loss}") paddle.save(model.state_dict(), model_path + '.pdparams') paddle.save(optimizer.state_dict(), model_path + '.pdopt') message = (f"----- Save model: {model_path}.pdparams \n" + diff --git a/image_classification/MAE/main_multi_gpu_pretrain.py b/image_classification/MAE/main_multi_gpu_pretrain.py new file mode 100644 index 00000000..b4757fa2 --- /dev/null +++ b/image_classification/MAE/main_multi_gpu_pretrain.py @@ -0,0 +1,422 @@ +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MAE finetuning using multiple GPU """ + +import sys +import os +import time +import logging +import argparse +import random +import math +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddle.distributed as dist +from paddle.distributed import fleet +from datasets import get_dataloader +from datasets import get_dataset +from transformer import build_mae_pretrain as build_model +from utils import AverageMeter +from utils import get_exclude_from_weight_decay_fn +from utils import get_params_groups +from utils import adjust_learning_rate +from config import get_config +from config import update_config +import paddlenlp + + +def get_arguments(): + """return argumeents, this will overwrite the config after loading yaml file""" + parser = argparse.ArgumentParser('MAE') + parser.add_argument('-cfg', type=str, default=None) + parser.add_argument('-dataset', type=str, default=None) + parser.add_argument('-batch_size', type=int, default=None) + parser.add_argument('-image_size', type=int, default=None) + parser.add_argument('-data_path', type=str, default=None) + parser.add_argument('-output', type=str, default=None) + parser.add_argument('-ngpus', type=int, default=None) + parser.add_argument('-pretrained', type=str, default=None) + parser.add_argument('-resume', type=str, default=None) + parser.add_argument('-last_epoch', type=int, default=None) + parser.add_argument('-eval', action='store_true') + parser.add_argument('-amp', action='store_true') + arguments = parser.parse_args() + return arguments + + +def get_logger(file_path): + """set logging file and format + Args: + filename: str, full path of the logger file to write + logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' + Return: + logger: python logger + """ + local_rank = dist.get_rank() + filename = os.path.join(file_path, f'log_all.txt') + log_format = "%(asctime)s %(message)s" + logging.basicConfig(filename=filename, level=logging.INFO, + format=log_format, datefmt="%m%d %I:%M:%S %p") + + # local_logger for each process/GPU + local_logger = logging.getLogger(f'local_{local_rank}') + filename = os.path.join(file_path, f'log_{local_rank}.txt') + fh = logging.FileHandler(filename) + fh.setFormatter(logging.Formatter(log_format)) + local_logger.addHandler(fh) + ## console + #sh = logging.StreamHandler(sys.stdout) + #sh.setFormatter(logging.Formatter(log_format)) + #local_logger.addHandler(sh) + + # master_logger records avg performance + if local_rank == 0: + master_logger = logging.getLogger('master') + # log.txt + filename = os.path.join(file_path, f'log.txt') + fh = logging.FileHandler(filename) + fh.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(fh) + # console + sh = logging.StreamHandler(sys.stdout) + sh.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(sh) + else: + master_logger = None + return local_logger, master_logger + + +def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): + if local_logger: + if level == 'info': + local_logger.info(msg_local) + elif level == 'fatal': + local_logger.fatal(msg_local) + else: + raise ValueError("level must in ['info', 'fatal']") + if master_logger and dist.get_rank() == 0: + if msg_master is None: + msg_master = msg_local + + if level == 'info': + master_logger.info("MASTER_LOG " + msg_master) + elif level == 'fatal': + master_logger.fatal("MASTER_LOG " + msg_master) + else: + raise ValueError("level must in ['info', 'fatal']") + + +def all_reduce_mean(x): + world_size = dist.get_world_size() + if world_size > 1: + x_reduce = paddle.to_tensor(x) + dist.all_reduce(x_reduce) + x_reduce = x_reduce / world_size + return x_reduce.item() + else: + return x + + +def train(dataloader, + model, + mask_ratio, + optimizer, + base_lr, + min_lr, + epoch, + warmup_epochs, + total_epochs, + total_batch, + debug_steps=100, + accum_iter=1, + amp=False, + local_logger=None, + master_logger=None): + """Training for one epoch + Args: + dataloader: paddle.io.DataLoader, dataloader instance + model: nn.Layer, a ViT model + masek_ratio, float, mask ratio + optimizer: nn.optimizer + base_lr: float, base learning rate + min_lr: float, minimum lr + epoch: int, current epoch + total_epochs: int, total num of epochs + total_batch: int, total num of batches for one epoch + debug_steps: int, num of iters to log info, default: 100 + accum_iter: int, num of iters for accumulating gradients, default: 1 + amp: bool, if True, use mix precision training, default: False + local_logger: logger for local process/gpu, default: None + master_logger: logger for main process, default: None + Returns: + train_loss_meter.avg: float, average loss on current process/gpu + master_loss_meter.avg: float, average loss on all processes/gpus + train_time: float, training time + """ + model.train() + train_loss_meter = AverageMeter() + master_loss_meter = AverageMeter() + + time_st = time.time() + + if amp is True: + scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + optimizer.clear_grad() + + for batch_id, data in enumerate(dataloader): + # get data + images = data[0] + batch_size = images.shape[0] + # adjust learning rate + if batch_id % accum_iter == 0: + adjust_learning_rate(optimizer, + base_lr, + min_lr, + batch_id / total_batch + epoch - 1, + warmup_epochs, + total_epochs) + # forward + with paddle.amp.auto_cast(amp is True): + loss, _, _ = model(images) + + loss_value = loss.item() + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + loss = loss / accum_iter + + if not amp: # fp32 + loss.backward() + if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): + optimizer.step() + optimizer.clear_grad() + else: + scaled_loss = scaler.scale(loss) + scaled_loss.backward() + if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): + # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 + scaler.step(optimizer) + scaler.update() + optimizer.clear_grad() + + # sync from other gpus for overall loss and acc + master_loss = all_reduce_mean(loss_value) + master_batch_size = all_reduce_mean(batch_size) + master_loss_meter.update(master_loss, master_batch_size) + train_loss_meter.update(loss_value, batch_size) + if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader): + local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + + f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Lr: {optimizer.get_lr():04f}, " + + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f})") + master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + + f"Step[{batch_id:04d}/{total_batch:04d}], " + + f"Lr: {optimizer.get_lr():04f}, " + + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f})") + write_log(local_logger, master_logger, local_message, master_message) + + train_time = time.time() - time_st + dist.barrier() + return train_loss_meter.avg, master_loss_meter.avg, train_time + + +def main_worker(*args): + # STEP 0: Preparation + #dist.init_parallel_env() + world_size = dist.get_world_size() + local_rank = dist.get_rank() + config = args[0] + last_epoch = config.TRAIN.LAST_EPOCH + seed = config.SEED + local_rank + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + # logger for each process/gpu + local_logger, master_logger = get_logger(config.SAVE) + message = f'----- world_size = {world_size}, local_rank = {local_rank}' + write_log(local_logger, master_logger, message) + + # STEP 1: Create model + paddle.device.set_device('gpu') + model = build_model(config) + if dist.get_world_size() > 1: + strategy = fleet.DistributedStrategy() + ## Hybrid Parallel Training + strategy.hybrid_configs = {} + fleet.init(is_collective=True, strategy=strategy) + + # STEP 2: Create train dataloader + dataset_train = args[1] + dataloader_train = get_dataloader(config, dataset_train, 'train', True) + total_batch_train = len(dataloader_train) + message = f'----- Total # of train batch (single gpu): {total_batch_train}' + write_log(local_logger, master_logger, message) + + # STEP 3: Define optimizer and lr_scheduler + # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) + if config.TRAIN.LINEAR_SCALED_LR is not None: + effective_batch_size = config.DATA.BATCH_SIZE * config.TRAIN.ACCUM_ITER * world_size + config.TRAIN.BASE_LR = ( + config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR + ) + write_log(local_logger, master_logger, f'Base lr is scaled to: {config.TRAIN.BASE_LR}') + + + if config.TRAIN.GRAD_CLIP: + clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) + else: + clip = None + + if config.TRAIN.OPTIMIZER.NAME == "AdamW": + #wd_exclude_list = ['encoder_position_embedding', 'cls_token'] + wd_exclude_list = [] + for n, p in model.named_parameters(): + if p.stop_gradient is True: + continue + if len(p.shape) == 1 or n.endswith('.bias'): + wd_exclude_list.append(n) + print('no_decay param names: ', wd_exclude_list) + optimizer = paddle.optimizer.AdamW( + parameters=model.parameters(), + learning_rate=config.TRAIN.BASE_LR, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + beta1=config.TRAIN.OPTIMIZER.BETAS[0], + beta2=config.TRAIN.OPTIMIZER.BETAS[1], + weight_decay=config.TRAIN.WEIGHT_DECAY, # set by params_groups, this vaule is not effectitve + apply_decay_param_fun=get_exclude_from_weight_decay_fn(wd_exclude_list), + epsilon=config.TRAIN.OPTIMIZER.EPS, + grad_clip=clip) + elif config.TRAIN.OPTIMIZER.NAME == "AdamWDL": + name_dict = dict() + wd_exclude_list = ['encoder_position_embedding', 'cls_token'] + for n, p in model.named_parameters(): + # name_dict is for AdamWDL argument 'name_dict' + name_dict[p.name] = n + # add no decay param name to weight exclude list, for AramWDL argument 'apply_decay_param_fn' + if p.stop_gradient is True: + continue + if len(p.shape) == 1 or n.endswith('.bias'): + wd_exclude_list.append(n) + print('no_decay param names: ', wd_exclude_list) + + optimizer = paddlenlp.ops.optimizer.AdamWDL( + learning_rate=config.TRAIN.BASE_LR, + weight_decay=config.TRAIN.WEIGHT_DECAY, + layerwise_decay=config.TRAIN.LAYER_DECAY, + n_layers=config.MODEL.TRANS.ENCODER.DEPTH, + set_param_lr_fun=lr_decay.lr_setting, + parameters=model.parameters(), + name_dict=name_dict, + apply_decay_param_fun=get_exclude_from_weight_decay_fn(wd_exclude_list), + beta1=config.TRAIN.OPTIMIZER.BETAS[0], + beta2=config.TRAIN.OPTIMIZER.BETAS[1], + epsilon=config.TRAIN.OPTIMIZER.EPS, + grad_clip=clip) + else: + message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." + write_log(local_logger, master_logger, message, None, 'fatal') + raise NotImplementedError(message) + + # STEP 4: Load pretrained model / load resumt model and optimizer states + if config.MODEL.PRETRAINED: + assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True + model_state = paddle.load(config.MODEL.PRETRAINED + '.pdparams') + model.set_state_dict(model_state) + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}.pdparams" + write_log(local_logger, master_logger, message) + + if config.MODEL.RESUME: + assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True + assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True + model_state = paddle.load(config.MODEL.RESUME+'.pdparams') + model.set_dict(model_state) + opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') + optimizer.set_state_dict(opt_state) + message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}.pdparams/.pdopts" + write_log(local_logger, master_logger, message) + if config.TRAIN.LAST_EPOCH == -1: + message = f"----- Resume Training: LAST_EPOCH should not be [-1]" + write_log(local_logger, master_logger, message, None, 'fatal') + + if dist.get_world_size() > 1: + model = fleet.distributed_model(model) + + # STEP 5: Start training (train mode) + write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") + for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): + # train + write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") + + train_loss, avg_loss, train_time = train( + dataloader=dataloader_train, + model=model, + mask_ratio=config.MODEL.TRANS.MASK_RATIO, + optimizer=optimizer, + base_lr=config.TRAIN.BASE_LR, + min_lr=config.TRAIN.END_LR, + epoch=epoch, + warmup_epochs=config.TRAIN.WARMUP_EPOCHS, + total_epochs=config.TRAIN.NUM_EPOCHS, + total_batch=total_batch_train, + debug_steps=config.REPORT_FREQ, + accum_iter=config.TRAIN.ACCUM_ITER, + amp=config.AMP, + local_logger=local_logger, + master_logger=master_logger) + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Lr: {optimizer.get_lr():.4f}, " + + f"Train Loss: {train_loss:.4f}, " + + f"time: {train_time:.2f}") + + master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Lr: {optimizer.get_lr():.4f}, " + + f"Train Loss: {avg_loss:.4f}, " + + f"time: {train_time:.2f}") + write_log(local_logger, master_logger, local_message, master_message) + + # model save + if local_rank == 0: + if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: + model_path = os.path.join( + config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{avg_loss}") + paddle.save(model.state_dict(), model_path + '.pdparams') + paddle.save(optimizer.state_dict(), model_path + '.pdopt') + message = (f"----- Save model: {model_path}.pdparams \n" + + f"----- Save optim: {model_path}.pdopt") + write_log(local_logger, master_logger, message) + + +def main(): + # config is updated by: (1) config.py, (2) yaml file, (3) arguments + arguments = get_arguments() + config = get_config() + config = update_config(config, arguments) + # set output folder + config.SAVE = '{}/finetuning-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) + if not os.path.exists(config.SAVE): + os.makedirs(config.SAVE, exist_ok=True) + # get dataset + dataset_train = get_dataset(config, mode='train') + # start training + #config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS + #dist.spawn(main_worker, args=(config, dataset_train, ), nprocs=config.NGPUS) + main_worker(config, dataset_train, ) + + +if __name__ == "__main__": + main() diff --git a/image_classification/MAE/run_pretrain_vit_b.sh b/image_classification/MAE/run_pretrain_vit_b.sh new file mode 100644 index 00000000..a053f1fd --- /dev/null +++ b/image_classification/MAE/run_pretrain_vit_b.sh @@ -0,0 +1,8 @@ +#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +#python main_multi_gpu_pretrain.py \ +GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_pretrain.py \ +-cfg='./configs/vit_base_patch16_224_pretrain.yaml' \ +-dataset='imagenet2012' \ +-batch_size=64 \ +-data_path='/dataset/imagenet' \ +-amp \ diff --git a/image_classification/MAE/run_pretrain_vit_b_debug.sh b/image_classification/MAE/run_pretrain_vit_b_debug.sh new file mode 100644 index 00000000..e78f350a --- /dev/null +++ b/image_classification/MAE/run_pretrain_vit_b_debug.sh @@ -0,0 +1,8 @@ +#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +#python main_multi_gpu_pretrain.py \ +GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_pretrain.py \ +-cfg='./configs/vit_base_patch16_224_pretrain_dec1.yaml' \ +-dataset='imagenet2012' \ +-batch_size=64 \ +-data_path='/dataset/imagenet' \ +-amp \ diff --git a/image_classification/MAE/transformer.py b/image_classification/MAE/transformer.py index c6481e5d..694e76bd 100644 --- a/image_classification/MAE/transformer.py +++ b/image_classification/MAE/transformer.py @@ -537,7 +537,7 @@ def unpatchify(self, x): x = x.reshape([images.shape[0], -1, n_patches * self.patch_size, n_patches * self.patch_size]) return x - def random_masking(self, x, mask_ratio): + def random_masking(self, x, mask_ratio, rand_probs=None): """ Shuffle x then mask the last few tokens according to mask ratio. Args: @@ -548,12 +548,13 @@ def random_masking(self, x, mask_ratio): """ batch_size, seq_len, embed_dim = x.shape keep_len = int(seq_len * (1 - mask_ratio)) - rand_probs = paddle.rand([batch_size, seq_len]) + # for debug only + rand_probs = rand_probs if rand_probs is not None else paddle.rand([batch_size, seq_len]) + #rand_probs = paddle.rand([batch_size, seq_len]) shuffle_ids = paddle.argsort(rand_probs, axis=-1) restore_ids = paddle.argsort(shuffle_ids, axis=-1) keep_ids = shuffle_ids[:, :keep_len] - ids = keep_ids + (paddle.arange(batch_size) * seq_len).unsqueeze(-1).expand([batch_size, -1]) x_masked = paddle.gather(x.flatten(0, 1), index=ids.flatten(), axis=0).reshape([batch_size, keep_len, -1]) @@ -564,12 +565,12 @@ def random_masking(self, x, mask_ratio): mask = paddle.gather(mask.flatten(), index=restore_ids_expand.flatten()).reshape([batch_size, seq_len]) return x_masked, mask, restore_ids - def forward_encoder(self, images, mask_ratio): + def forward_encoder(self, images, mask_ratio, rand_probs=None): x = self.patch_embedding(images) # add pos embed w/o cls token x = x + self.encoder_position_embedding[:, 1:, :] # masking - x, mask, ids_restore = self.random_masking(x, mask_ratio) + x, mask, ids_restore = self.random_masking(x, mask_ratio, rand_probs) # append cls token cls_token = self.cls_token + self.encoder_position_embedding[:, :1, :] cls_tokens = cls_token.expand((x.shape[0], -1, -1)) @@ -586,7 +587,14 @@ def forward_decoder(self, x, ids_restore): # x_: [batch, num_patches, decoder_embed_dim] x_ = paddle.concat([x[:, 1:, :], mask_tokens], axis=1) # no cls token x_shape = x_.shape - x_ = paddle.gather(x_.flatten(0, 1), index=ids_restore.flatten()).reshape(x_shape) + batch_size = x_shape[0] + seq_len = x_shape[1] + + ## The following ops assures the paddle gather_nd op has the same behaviour as pytorch gather op. + ids_restore_expand = ids_restore + (paddle.arange(batch_size) * seq_len).unsqueeze(-1).expand([batch_size, -1]) + x_ = paddle.gather_nd(x_.flatten(0, 1), index=ids_restore_expand.flatten().unsqueeze(-1)) + x_ = x_.reshape(x_shape) + x = paddle.concat([x[:, :1, :], x_], axis=1) # append cls token x = x + self.decoder_position_embedding @@ -607,8 +615,8 @@ def forward_loss(self, images, pred, mask): loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches return loss - def forward(self, images, mask_ratio=0.75): - encoder_out, mask, restore_ids = self.forward_encoder(images, mask_ratio) + def forward(self, images, mask_ratio=0.75, rand_probs=None): + encoder_out, mask, restore_ids = self.forward_encoder(images, mask_ratio, rand_probs) decoder_out = self.forward_decoder(encoder_out, restore_ids) loss = self.forward_loss(images, decoder_out, mask) return loss, decoder_out, mask From 1486859fdd15e75b0165bc6a4a8c1b1249bb0985 Mon Sep 17 00:00:00 2001 From: xperzy Date: Fri, 4 Mar 2022 14:46:15 +0800 Subject: [PATCH 10/12] update readme --- image_classification/MAE/README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/image_classification/MAE/README.md b/image_classification/MAE/README.md index 769a22cd..216e757e 100644 --- a/image_classification/MAE/README.md +++ b/image_classification/MAE/README.md @@ -30,18 +30,18 @@ pip install paddlenlp ## Models Zoo | Finetuned Model | Acc@1 | Acc@5 | #Params | FLOPs | Image Size | Crop_pct | Interpolation | Link | |-------------------------------|-------|-------|---------|--------|------------|----------|---------------|--------------| -| mae_finetuned_vit_base | 83.72 | 96.54 | 86.4M | 17.0G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1txV3fWnu_Jr17tCCqk9e_pFeuh7GkmvU/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1cqed6Omp8GeNVaa3-W82GA?pwd=i71u)(i71u) | -| mae_finetuned_vit_large | 85.95 | 97.57 | 304.1M | 59.9G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1dzVWxQ0_XTKqKKpA3pSSVU57rT_g8nOe/view?usp=sharing)/[baidu](https://pan.baidu.com/s/17cG1UC3gX4dAXdGDTv_BBw?pwd=v2zk)(v2zk) | -| mae_finetuned_vit_huge | 86.90 | 98.07 | 631.7M | 162.5G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1xqjdPez4uG495w3akVbHbn4YqUB1Nmmk/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1N1t-dsNZpwXSKeVOTkz3IQ?pwd=gs6c)(gs6c) | +| mae_finetuned_vit_base | 83.72 | 96.54 | 86.4M | 17.0G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1txV3fWnu_Jr17tCCqk9e_pFeuh7GkmvU/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1rIV2lYHEIYhD0ScTxmMi5A?pwd=svaw)(svaw) | +| mae_finetuned_vit_large | 85.95 | 97.57 | 304.1M | 59.9G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1dzVWxQ0_XTKqKKpA3pSSVU57rT_g8nOe/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1zlqmA-_fqCNZiuKOPMTtQA?pwd=tp48)(tp48) | +| mae_finetuned_vit_huge | 86.90 | 98.07 | 631.7M | 162.5G | 224 | 0.875 | bicubic | [google](https://drive.google.com/file/d/1xqjdPez4uG495w3akVbHbn4YqUB1Nmmk/view?usp=sharing)/[baidu](https://pan.baidu.com/s/17z-NK-akSlvYJSRZkUU2CQ?pwd=1fds)(1fds) | > *The results are evaluated on ImageNet2012 validation set. | Pretrained Model | Link | |-------------------------------|--------------| -| mae_pretrain_vit_base | [google](https://drive.google.com/file/d/1K7ZEaDj1D56i7uTX46hSelf0Ydbpmtie/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1kBn-ad2xyCar4xt-k_oYaA?pwd=rmsi)(rmsi) | -| mae_pretrain_vit_large | [google](https://drive.google.com/file/d/1UagT3mz_cLHcjyIQfyyLOkXtJXda3UbS/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1IcdX_rDdl9vLyI7rD1I8HQ?pwd=r77v)(r77v) | -| mae_pretrain_vit_huge | [google](https://drive.google.com/file/d/1Y1lIO_COL2vkz2YvrmYt2yI8iAiRNiPh/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1Wk_tp8De4AYNFBGnIgl5fg?pwd=mthi)(mthi) | - +| mae_pretrain_vit_base | [google](https://drive.google.com/file/d/1K7ZEaDj1D56i7uTX46hSelf0Ydbpmtie/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1aFdDhA61-5lB9g6LoAlKoQ?pwd=3fu3)(3fu3) | +| mae_pretrain_vit_large | [google](https://drive.google.com/file/d/1UagT3mz_cLHcjyIQfyyLOkXtJXda3UbS/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1UIZuA_3uk5v-AHX41rjd0A?pwd=9c3s)(9c3s) | +| mae_pretrain_vit_huge | [google](https://drive.google.com/file/d/1Y1lIO_COL2vkz2YvrmYt2yI8iAiRNiPh/view?usp=sharing)/[baidu](https://pan.baidu.com/s/1XN-WkiiICqQUXcmv44PUxw?pwd=vc42)(vc42) | +> Note: current model weighs are ported from official repo for paddle, our trainied model weights are coming soon. ## Notebooks We provide a few notebooks in aistudio to help you get started: From 2cc0e3a94660e20838ad181a8ab97bb4ec834c4f Mon Sep 17 00:00:00 2001 From: xperzy Date: Tue, 8 Mar 2022 14:23:33 +0800 Subject: [PATCH 11/12] add linear probe, fix bugs --- image_classification/MAE/config.py | 4 +- ...base_patch16_224_finetune_single_node.yaml | 2 +- ...e_patch16_224_linearprobe_single_node.yaml | 25 + .../vit_base_patch16_224_pretrain_dec1.yaml | 2 + .../MAE/main_multi_gpu_finetune.py | 192 +++-- .../MAE/main_multi_gpu_linearprobe.py | 691 ++++++++++++++++++ .../MAE/main_multi_gpu_pretrain.py | 175 +++-- .../MAE/run_linearprobe_vit_b.sh | 10 + .../MAE/run_linearprobe_vit_b_single_node.sh | 9 + 9 files changed, 960 insertions(+), 150 deletions(-) create mode 100644 image_classification/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml create mode 100644 image_classification/MAE/main_multi_gpu_linearprobe.py create mode 100644 image_classification/MAE/run_linearprobe_vit_b.sh create mode 100644 image_classification/MAE/run_linearprobe_vit_b_single_node.sh diff --git a/image_classification/MAE/config.py b/image_classification/MAE/config.py index b0ad64e2..5ef1372d 100644 --- a/image_classification/MAE/config.py +++ b/image_classification/MAE/config.py @@ -34,7 +34,7 @@ _C.DATA.DATASET = 'imagenet2012' # dataset name, currently only support imagenet2012 _C.DATA.IMAGE_SIZE = 224 # input image size: 224 for pretrain _C.DATA.CROP_PCT = 0.875 # input image scale ratio, scale is applied before centercrop in eval mode -_C.DATA.NUM_WORKERS = 1 # number of data loading threads +_C.DATA.NUM_WORKERS = 2 # number of data loading threads _C.DATA.IMAGENET_MEAN = [0.485, 0.456, 0.406] # [0.5, 0.5, 0.5] # imagenet mean values _C.DATA.IMAGENET_STD = [0.229, 0.224, 0.225] # [0.5, 0.5, 0.5] # imagenet std values @@ -162,6 +162,8 @@ def update_config(config, args): config.DATA.DATA_PATH = args.data_path if args.ngpus: config.NGPUS = args.ngpus + if args.accum_iter: + config.TRAIN.ACCUM_ITER = args.accum_iter if args.eval: config.EVAL = True config.DATA.BATCH_SIZE_EVAL = args.batch_size diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml index ae80de13..af89b84b 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml @@ -42,6 +42,6 @@ TRAIN: RANDOM_ERASE_SPLIT: False VALIDATE_FREQ: 1 -SAVE_FREQ: 10 +SAVE_FREQ: 1 REPORT_FREQ: 20 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml b/image_classification/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml new file mode 100644 index 00000000..35369139 --- /dev/null +++ b/image_classification/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml @@ -0,0 +1,25 @@ +DATA: + IMAGE_SIZE: 224 + CROP_PCT: 0.875 +MODEL: + TYPE: LINEARPROBE + NAME: vit_base_patch16_224 + GLOBAL_POOL: False # enable cls_token + TRANS: + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 +TRAIN: + ACCUM_ITER: 4 + NUM_EPOCHS: 90 + WARMUP_EPOCHS: 10 + WEIGHT_DECAY: 0.0 + BASE_LR: 0.1 + LINEAR_SCALED_LR: 256 + END_LR: 0.0 + OPTIMIZER: + NAME: 'LARS' diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml b/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml index fb50c6bd..ad2b7a63 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml +++ b/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml @@ -31,3 +31,5 @@ TRAIN: OPTIMIZER: NAME: 'AdamW' BETAS: (0.9, 0.95) + +SAVE_FREQ: 1 diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/image_classification/MAE/main_multi_gpu_finetune.py index a1cc90fc..d887af3d 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/image_classification/MAE/main_multi_gpu_finetune.py @@ -56,6 +56,7 @@ def get_arguments(): parser.add_argument('-data_path', type=str, default=None) parser.add_argument('-output', type=str, default=None) parser.add_argument('-ngpus', type=int, default=None) + parser.add_argument('-accum_iter', type=int, default=None) parser.add_argument('-pretrained', type=str, default=None) parser.add_argument('-resume', type=str, default=None) parser.add_argument('-last_epoch', type=int, default=None) @@ -66,12 +67,14 @@ def get_arguments(): def get_logger(file_path): - """set logging file and format + """Set logging file and format, logs are written in 2 loggers, one local_logger records + the information on its own gpu/process, one master_logger records the overall/average + information over all gpus/processes. Args: - filename: str, full path of the logger file to write - logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' + file_path: str, folder path of the logger files to write Return: - logger: python logger + local_logger: python logger for each process + master_logger: python logger for overall processes (on node 0) """ local_rank = dist.get_rank() filename = os.path.join(file_path, f'log_all.txt') @@ -108,26 +111,40 @@ def get_logger(file_path): def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): + """Write messages in loggers + Args: + local_logger: python logger, logs information on single gpu + master_logger: python logger, logs information over all gpus + msg_local: str, message to log on local_logger + msg_master: str, message to log on master_logger, if None, use msg_local, default: None + level: str, log level, in ['info', 'warning', 'fatal'], default: 'info' + """ + # write log to local logger if local_logger: if level == 'info': local_logger.info(msg_local) + elif level == 'warning': + local_logger.warning(msg_local) elif level == 'fatal': local_logger.fatal(msg_local) else: - raise ValueError("level must in ['info', 'fatal']") + raise ValueError("level must in ['info', 'warning', 'fatal']") + # write log to master logger on node 0 if master_logger and dist.get_rank() == 0: if msg_master is None: msg_master = msg_local - if level == 'info': master_logger.info("MASTER_LOG " + msg_master) + elif level == 'warning': + master_logger.warning("MASTER_LOG " + msg_master) elif level == 'fatal': master_logger.fatal("MASTER_LOG " + msg_master) else: - raise ValueError("level must in ['info', 'fatal']") + raise ValueError("level must in ['info', 'warning', 'fatal']") def all_reduce_mean(x): + """perform all_reduce on Tensor""" world_size = dist.get_world_size() if world_size > 1: x_reduce = paddle.to_tensor(x) @@ -147,11 +164,11 @@ def train(dataloader, epoch, warmup_epochs, total_epochs, - total_batch, + total_batches, debug_steps=100, accum_iter=1, mixup_fn=None, - amp=False, + amp_grad_scaler=None, local_logger=None, master_logger=None): """Training for one epoch @@ -162,11 +179,11 @@ def train(dataloader, criterion: nn.XXLoss epoch: int, current epoch total_epochs: int, total num of epochs - total_batch: int, total num of batches for one epoch + total_batches: int, total num of batches for one epoch debug_steps: int, num of iters to log info, default: 100 accum_iter: int, num of iters for accumulating gradients, default: 1 mixup_fn: Mixup, mixup instance, default: None - amp: bool, if True, use mix precision training, default: False + amp_grad_scaler: GradScaler/None, if not None, pass the GradScaler and enable AMP training, default: None local_logger: logger for local process/gpu, default: None master_logger: logger for main process, default: None Returns: @@ -184,8 +201,8 @@ def train(dataloader, time_st = time.time() - if amp is True: - scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + #if amp is True: + # scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 optimizer.clear_grad() for batch_id, data in enumerate(dataloader): @@ -199,14 +216,14 @@ def train(dataloader, images, label = mixup_fn(images, label_orig) if batch_id % accum_iter == 0: - lr = adjust_learning_rate(optimizer, - base_lr, - min_lr, - batch_id / total_batch + epoch - 1, - warmup_epochs, - total_epochs) + adjust_learning_rate(optimizer, + base_lr, + min_lr, + batch_id / total_batches + epoch - 1, + warmup_epochs, + total_epochs) # forward - with paddle.amp.auto_cast(amp is True): + with paddle.amp.auto_cast(amp_grad_scaler is not None): output = model(images) loss = criterion(output, label) @@ -216,19 +233,20 @@ def train(dataloader, sys.exit(1) loss = loss / accum_iter - - if not amp: # fp32 + + # backward and step + if amp_grad_scaler is None: # fp32 loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): optimizer.step() optimizer.clear_grad() - else: - scaled_loss = scaler.scale(loss) + else: # amp + scaled_loss = amp_grad_scaler.scale(loss) scaled_loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 - scaler.step(optimizer) - scaler.update() + amp_grad_scaler.step(optimizer) + amp_grad_scaler.update() optimizer.clear_grad() pred = F.softmax(output) @@ -249,15 +267,14 @@ def train(dataloader, train_acc_meter.update(acc, batch_size) if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader): - local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Lr: {optimizer.get_lr():04f}, " + - f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), " + + general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + f"Step[{batch_id:04d}/{total_batches:04d}], " + f"Lr: {optimizer.get_lr():04f}, ") + local_message = (general_message + + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), " f"Avg Acc: {train_acc_meter.avg:.4f}") - master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Lr: {optimizer.get_lr():04f}, " + - f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), " + + master_message = (general_message + + f"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f}), " f"Avg Acc: {master_acc_meter.avg:.4f}") write_log(local_logger, master_logger, local_message, master_message) @@ -274,7 +291,7 @@ def train(dataloader, def validate(dataloader, model, criterion, - total_batch, + total_batches, debug_steps=100, local_logger=None, master_logger=None): @@ -282,7 +299,7 @@ def validate(dataloader, Args: dataloader: paddle.io.DataLoader, dataloader instance model: nn.Layer, a ViT model - total_batch: int, total num of batches for one epoch + total_batches: int, total num of batches for one epoch debug_steps: int, num of iters to log info, default: 100 local_logger: logger for local process/gpu, default: None master_logger: logger for main process, default: None @@ -333,18 +350,17 @@ def validate(dataloader, val_acc5_meter.update(acc5, batch_size) if batch_id % debug_steps == 0: - local_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + + local_message = (f"Step[{batch_id:04d}/{total_batches:04d}], " + f"Avg Loss: {val_loss_meter.avg:.4f}, " + f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " + f"Avg Acc@5: {val_acc5_meter.avg:.4f}") - master_message = (f"Step[{batch_id:04d}/{total_batch:04d}], " + + master_message = (f"Step[{batch_id:04d}/{total_batches:04d}], " + f"Avg Loss: {master_loss_meter.avg:.4f}, " + f"Avg Acc@1: {master_acc1_meter.avg:.4f}, " + f"Avg Acc@5: {master_acc5_meter.avg:.4f}") write_log(local_logger, master_logger, local_message, master_message) - - val_time = time.time() - time_st dist.barrier() + val_time = time.time() - time_st return (val_loss_meter.avg, val_acc1_meter.avg, val_acc5_meter.avg, @@ -424,12 +440,17 @@ def main_worker(*args): ) write_log(local_logger, master_logger, f'Base lr is scaled to: {config.TRAIN.BASE_LR}') - + # define scaler for amp training + if config.AMP: + amp_grad_scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + else: + amp_grad_scaler = None + # set gradient clip if config.TRAIN.GRAD_CLIP: clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) else: clip = None - + # set optimizer if config.TRAIN.OPTIMIZER.NAME == "AdamW": params_groups = lr_decay.param_groups_lrd( model=model, @@ -453,7 +474,7 @@ def main_worker(*args): # add no decay param name to weight exclude list, for AramWDL argument 'apply_decay_param_fn' if p.ndim == 1 or n.endswith('.bias'): wd_exclude_list.append(n) - print('no_decay param names: ', wd_exclude_list) + #print('no_decay param names: ', wd_exclude_list) optimizer = paddlenlp.ops.optimizer.AdamWDL( learning_rate=config.TRAIN.BASE_LR, @@ -475,9 +496,11 @@ def main_worker(*args): # STEP 6: Load pretrained model / load resumt model and optimizer states if config.MODEL.PRETRAINED: - assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True - model_state = paddle.load(config.MODEL.PRETRAINED + '.pdparams') - + assert os.path.isfile(config.MODEL.PRETRAINED) is True + model_state = paddle.load(config.MODEL.PRETRAINED) + if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch + # pretrain only load model weight, opt and epoch are ignored + model_state = model_state['model'] if not config.EVAL: keys = ['encoder.norm.weight', 'encoder.norm.bias', 'classfier.weight', 'classifier.bias'] @@ -495,18 +518,29 @@ def main_worker(*args): interpolate_pos_embed(model, model_state) model.set_state_dict(model_state) - message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}.pdparams" + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" write_log(local_logger, master_logger, message) if config.MODEL.RESUME: - assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True - assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True - model_state = paddle.load(config.MODEL.RESUME+'.pdparams') - model.set_dict(model_state) - opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') - optimizer.set_state_dict(opt_state) - message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}.pdparams/.pdopts" - write_log(local_logger, master_logger, message) + assert os.path.isfile(config.MODEL.RESUME) is True + model_state = paddle.load(config.MODEL.RESUME) + if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch + model.set_state_dict(model_state['model']) + if 'optimizer' in model_state and 'epoch' in model_state: + optimizer.set_state_dict(model_state['optimizer']) + # last_epoch = 1 means training from epoch 2 (1 + 1) + config.TRAIN.LAST_EPOCH = model_state['epoch'] + 1 + if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None: + amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler']) + message = (f"----- Resume Training: Load model from {config.MODEL.RESUME}, " + f"opt = [{'optimizer' in model_state}], " + f"epoch = [{model_state.get('epoch', -1)}], " + f"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]") + write_log(local_logger, master_logger, message) + else: # direct load pdparams without other items + message = f"----- Resume Training: Load model from {config.MODEL.RESUME}, no opt, epoch, or scaler is set!" + write_log(local_logger, master_logger, message, 'warning') + model.set_dict(model_state) if dist.get_world_size() > 1: model = fleet.distributed_model(model) @@ -518,7 +552,7 @@ def main_worker(*args): dataloader=dataloader_val, model=model, criterion=criterion_val, - total_batch=total_batch_val, + total_batches=total_batch_val, debug_steps=config.REPORT_FREQ, local_logger=local_logger, master_logger=master_logger) @@ -554,25 +588,25 @@ def main_worker(*args): epoch=epoch, warmup_epochs=config.TRAIN.WARMUP_EPOCHS, total_epochs=config.TRAIN.NUM_EPOCHS, - total_batch=total_batch_train, + total_batches=total_batch_train, debug_steps=config.REPORT_FREQ, accum_iter=config.TRAIN.ACCUM_ITER, mixup_fn=mixup_fn, - amp=config.AMP, + amp_grad_scaler=amp_grad_scaler, local_logger=local_logger, master_logger=master_logger) - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Lr: {optimizer.get_lr():.4f}, " + - f"Train Loss: {train_loss:.4f}, " + - f"Train Acc: {train_acc:.4f}, " + - f"time: {train_time:.2f}") - - master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Lr: {optimizer.get_lr():.4f}, " + - f"Train Loss: {avg_loss:.4f}, " + - f"Train Acc: {avg_acc:.4f}, " + - f"time: {train_time:.2f}") + general_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + f"Lr: {optimizer.get_lr():.4f}, " + f"time: {train_time:.2f}") + + local_message = (general_message + + f"Train Loss: {train_loss:.4f}, " + f"Train Acc: {train_acc:.4f}") + master_message = (general_message + + f"Train Loss: {avg_loss:.4f}, " + f"Train Acc: {avg_acc:.4f}") + write_log(local_logger, master_logger, local_message, master_message) # validation @@ -582,7 +616,7 @@ def main_worker(*args): dataloader=dataloader_val, model=model, criterion=criterion_val, - total_batch=total_batch_val, + total_batches=total_batch_val, debug_steps=config.REPORT_FREQ, local_logger=local_logger, master_logger=master_logger) @@ -604,11 +638,15 @@ def main_worker(*args): if local_rank == 0: if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: model_path = os.path.join( - config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{avg_loss}") - paddle.save(model.state_dict(), model_path + '.pdparams') - paddle.save(optimizer.state_dict(), model_path + '.pdopt') - message = (f"----- Save model: {model_path}.pdparams \n" + - f"----- Save optim: {model_path}.pdopt") + config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{avg_loss}.pdparams") + state_dict = dict() + state_dict['model'] = model.state_dict() + state_dict['optimizer'] = optimizer.state_dict() + state_dict['epoch'] = epoch + if amp_grad_scaler is not None: + state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict() + paddle.save(state_dict, model_path) + message = (f"----- Save model: {model_path}") write_log(local_logger, master_logger, message) @@ -619,9 +657,9 @@ def main(): config = update_config(config, arguments) # set output folder if not config.EVAL: - config.SAVE = '{}/finetuning-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) + config.SAVE = '{}/finetuning-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) else: - config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) + config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) if not os.path.exists(config.SAVE): os.makedirs(config.SAVE, exist_ok=True) # get train dataset if in train mode diff --git a/image_classification/MAE/main_multi_gpu_linearprobe.py b/image_classification/MAE/main_multi_gpu_linearprobe.py new file mode 100644 index 00000000..a8a09ff1 --- /dev/null +++ b/image_classification/MAE/main_multi_gpu_linearprobe.py @@ -0,0 +1,691 @@ +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MAE linear probing using multiple GPU """ + +import sys +import os +import time +import logging +import argparse +import random +import math +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddle.distributed as dist +from paddle.distributed import fleet +from datasets import get_dataloader +from datasets import get_dataset +from mixup import Mixup +from losses import LabelSmoothingCrossEntropyLoss +from losses import SoftTargetCrossEntropyLoss +from transformer import build_transformer as build_model +from utils import AverageMeter +from utils import WarmupCosineScheduler +from utils import get_exclude_from_weight_decay_fn +from utils import get_params_groups +from utils import cosine_scheduler +from utils import adjust_learning_rate +from utils import interpolate_pos_embed +import lr_decay +from config import get_config +from config import update_config +import paddlenlp + + +def get_arguments(): + """return argumeents, this will overwrite the config after loading yaml file""" + parser = argparse.ArgumentParser('MAE') + parser.add_argument('-cfg', type=str, default=None) + parser.add_argument('-dataset', type=str, default=None) + parser.add_argument('-batch_size', type=int, default=None) + parser.add_argument('-image_size', type=int, default=None) + parser.add_argument('-data_path', type=str, default=None) + parser.add_argument('-output', type=str, default=None) + parser.add_argument('-ngpus', type=int, default=None) + parser.add_argument('-accum_iter', type=int, default=None) + parser.add_argument('-pretrained', type=str, default=None) + parser.add_argument('-resume', type=str, default=None) + parser.add_argument('-last_epoch', type=int, default=None) + parser.add_argument('-eval', action='store_true') + parser.add_argument('-amp', action='store_true') + arguments = parser.parse_args() + return arguments + + +def get_logger(file_path): + """Set logging file and format, logs are written in 2 loggers, one local_logger records + the information on its own gpu/process, one master_logger records the overall/average + information over all gpus/processes. + Args: + file_path: str, folder path of the logger files to write + Return: + local_logger: python logger for each process + master_logger: python logger for overall processes (on node 0) + """ + local_rank = dist.get_rank() + filename = os.path.join(file_path, f'log_all.txt') + log_format = "%(asctime)s %(message)s" + logging.basicConfig(filename=filename, level=logging.INFO, + format=log_format, datefmt="%m%d %I:%M:%S %p") + + # local_logger for each process/GPU + local_logger = logging.getLogger(f'local_{local_rank}') + filename = os.path.join(file_path, f'log_{local_rank}.txt') + fh = logging.FileHandler(filename) + fh.setFormatter(logging.Formatter(log_format)) + local_logger.addHandler(fh) + ## console + #sh = logging.StreamHandler(sys.stdout) + #sh.setFormatter(logging.Formatter(log_format)) + #local_logger.addHandler(sh) + + # master_logger records avg performance + if local_rank == 0: + master_logger = logging.getLogger('master') + # log.txt + filename = os.path.join(file_path, f'log.txt') + fh = logging.FileHandler(filename) + fh.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(fh) + # console + sh = logging.StreamHandler(sys.stdout) + sh.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(sh) + else: + master_logger = None + return local_logger, master_logger + + +def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): + """Write messages in loggers + Args: + local_logger: python logger, logs information on single gpu + master_logger: python logger, logs information over all gpus + msg_local: str, message to log on local_logger + msg_master: str, message to log on master_logger, if None, use msg_local, default: None + level: str, log level, in ['info', 'warning', 'fatal'], default: 'info' + """ + # write log to local logger + if local_logger: + if level == 'info': + local_logger.info(msg_local) + elif level == 'warning': + local_logger.warning(msg_local) + elif level == 'fatal': + local_logger.fatal(msg_local) + else: + raise ValueError("level must in ['info', 'warning', 'fatal']") + # write log to master logger on node 0 + if master_logger and dist.get_rank() == 0: + if msg_master is None: + msg_master = msg_local + if level == 'info': + master_logger.info("MASTER_LOG " + msg_master) + elif level == 'warning': + master_logger.warning("MASTER_LOG " + msg_master) + elif level == 'fatal': + master_logger.fatal("MASTER_LOG " + msg_master) + else: + raise ValueError("level must in ['info', 'warning', 'fatal']") + + +def all_reduce_mean(x): + """perform all_reduce on Tensor""" + world_size = dist.get_world_size() + if world_size > 1: + x_reduce = paddle.to_tensor(x) + dist.all_reduce(x_reduce) + x_reduce = x_reduce / world_size + return x_reduce.item() + else: + return x + + +def train(dataloader, + model, + optimizer, + criterion, + base_lr, + min_lr, + epoch, + warmup_epochs, + total_epochs, + total_batches, + debug_steps=100, + accum_iter=1, + mixup_fn=None, + amp_grad_scaler=None, + local_logger=None, + master_logger=None): + """Training for one epoch + Args: + dataloader: paddle.io.DataLoader, dataloader instance + model: nn.Layer, a ViT model + optimizer: nn.optimizer + criterion: nn.XXLoss + epoch: int, current epoch + total_epochs: int, total num of epochs + total_batches: int, total num of batches for one epoch + debug_steps: int, num of iters to log info, default: 100 + accum_iter: int, num of iters for accumulating gradients, default: 1 + mixup_fn: Mixup, mixup instance, default: None + amp_grad_scaler: GradScaler/None, if not None, pass the GradScaler and enable AMP training, default: None + local_logger: logger for local process/gpu, default: None + master_logger: logger for main process, default: None + Returns: + train_loss_meter.avg: float, average loss on current process/gpu + train_acc_meter.avg: float, average acc@1 on current process/gpu + master_loss_meter.avg: float, average loss on all processes/gpus + master_acc_meter.avg: float, average acc@1 on all processes/gpus + train_time: float, training time + """ + model.train() + train_loss_meter = AverageMeter() + train_acc_meter = AverageMeter() + master_loss_meter = AverageMeter() + master_acc_meter = AverageMeter() + + time_st = time.time() + + #if amp is True: + # scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + optimizer.clear_grad() + + for batch_id, data in enumerate(dataloader): + # get data + images = data[0] + label = data[1] + label_orig = label.clone() + batch_size = images.shape[0] + + if mixup_fn is not None: + images, label = mixup_fn(images, label_orig) + + if batch_id % accum_iter == 0: + adjust_learning_rate(optimizer, + base_lr, + min_lr, + batch_id / total_batches + epoch - 1, + warmup_epochs, + total_epochs) + # forward + with paddle.amp.auto_cast(amp_grad_scaler is not None): + output = model(images) + loss = criterion(output, label) + + loss_value = loss.item() + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + loss = loss / accum_iter + + # backward and step + if amp_grad_scaler is None: # fp32 + loss.backward() + if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): + optimizer.step() + optimizer.clear_grad() + else: # amp + scaled_loss = amp_grad_scaler.scale(loss) + scaled_loss.backward() + if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): + # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 + amp_grad_scaler.step(optimizer) + amp_grad_scaler.update() + optimizer.clear_grad() + + pred = F.softmax(output) + if mixup_fn: + acc = paddle.metric.accuracy(pred, label_orig).item() + else: + acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1)).item() + + # sync from other gpus for overall loss and acc + + master_loss = all_reduce_mean(loss_value) + master_acc = all_reduce_mean(acc) + master_batch_size = all_reduce_mean(batch_size) + + master_loss_meter.update(master_loss, master_batch_size) + master_acc_meter.update(master_acc, master_batch_size) + train_loss_meter.update(loss_value, batch_size) + train_acc_meter.update(acc, batch_size) + + if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader): + general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + f"Step[{batch_id:04d}/{total_batches:04d}], " + f"Lr: {optimizer.get_lr():04f}, ") + local_message = (general_message + + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), " + f"Avg Acc: {train_acc_meter.avg:.4f}") + master_message = (general_message + + f"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f}), " + f"Avg Acc: {master_acc_meter.avg:.4f}") + write_log(local_logger, master_logger, local_message, master_message) + + train_time = time.time() - time_st + dist.barrier() + return (train_loss_meter.avg, + train_acc_meter.avg, + master_loss_meter.avg, + master_acc_meter.avg, + train_time) + + +@paddle.no_grad() +def validate(dataloader, + model, + criterion, + total_batches, + debug_steps=100, + local_logger=None, + master_logger=None): + """Validation for the whole dataset + Args: + dataloader: paddle.io.DataLoader, dataloader instance + model: nn.Layer, a ViT model + total_batches: int, total num of batches for one epoch + debug_steps: int, num of iters to log info, default: 100 + local_logger: logger for local process/gpu, default: None + master_logger: logger for main process, default: None + Returns: + val_loss_meter.avg: float, average loss on current process/gpu + val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus + val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus + master_loss_meter.avg: float, average loss on all processes/gpus + master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus + master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus + val_time: float, validation time + """ + model.eval() + val_loss_meter = AverageMeter() + val_acc1_meter = AverageMeter() + val_acc5_meter = AverageMeter() + master_loss_meter = AverageMeter() + master_acc1_meter = AverageMeter() + master_acc5_meter = AverageMeter() + + time_st = time.time() + + for batch_id, data in enumerate(dataloader): + # get data + images = data[0] + label = data[1] + batch_size = images.shape[0] + + output = model(images) + loss = criterion(output, label) + loss_value = loss.item() + + pred = F.softmax(output) + acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item() + acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item() + + # sync from other gpus for overall loss and acc + master_loss = all_reduce_mean(loss_value) + master_acc1 = all_reduce_mean(acc1) + master_acc5 = all_reduce_mean(acc5) + master_batch_size = all_reduce_mean(batch_size) + + master_loss_meter.update(master_loss, master_batch_size) + master_acc1_meter.update(master_acc1, master_batch_size) + master_acc5_meter.update(master_acc5, master_batch_size) + val_loss_meter.update(loss_value, batch_size) + val_acc1_meter.update(acc1, batch_size) + val_acc5_meter.update(acc5, batch_size) + + if batch_id % debug_steps == 0: + local_message = (f"Step[{batch_id:04d}/{total_batches:04d}], " + + f"Avg Loss: {val_loss_meter.avg:.4f}, " + + f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " + + f"Avg Acc@5: {val_acc5_meter.avg:.4f}") + master_message = (f"Step[{batch_id:04d}/{total_batches:04d}], " + + f"Avg Loss: {master_loss_meter.avg:.4f}, " + + f"Avg Acc@1: {master_acc1_meter.avg:.4f}, " + + f"Avg Acc@5: {master_acc5_meter.avg:.4f}") + write_log(local_logger, master_logger, local_message, master_message) + dist.barrier() + val_time = time.time() - time_st + return (val_loss_meter.avg, + val_acc1_meter.avg, + val_acc5_meter.avg, + master_loss_meter.avg, + master_acc1_meter.avg, + master_acc5_meter.avg, + val_time) + + +def main_worker(*args): + # STEP 0: Preparation + #dist.init_parallel_env() + world_size = dist.get_world_size() + local_rank = dist.get_rank() + config = args[0] + last_epoch = config.TRAIN.LAST_EPOCH + seed = config.SEED + local_rank + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + # logger for each process/gpu + local_logger, master_logger = get_logger(config.SAVE) + message = f'----- world_size = {world_size}, local_rank = {local_rank}' + write_log(local_logger, master_logger, message) + + # STEP 1: Create model + paddle.device.set_device('gpu') + model = build_model(config) + if dist.get_world_size() > 1: + strategy = fleet.DistributedStrategy() + # lars + if config.TRAIN.OPTIMIZER.NAME == "LARS": + strategy.lars = True + strategy.lars_configs = { + "lars_coeff": 0.001, + "lars_weight_decay": config.TRAIN.WEIGHT_DECAY, + "exclude_from_weight_decay": ['classifier.0._mean', 'classifier.0._variance'] + } + + ## Hybrid Parallel Training + strategy.hybrid_configs = {} + fleet.init(is_collective=True, strategy=strategy) + + # STEP 2: Create train and val dataloader + if not config.EVAL: + dataset_train = args[1] + dataloader_train = get_dataloader(config, dataset_train, 'train', True) + total_batch_train = len(dataloader_train) + message = f'----- Total # of train batch (single gpu): {total_batch_train}' + write_log(local_logger, master_logger, message) + + dataset_val = args[2] + dataloader_val = get_dataloader(config, dataset_val, 'val', True) + total_batch_val = len(dataloader_val) + message = f'----- Total # of val batch (single gpu): {total_batch_val}' + write_log(local_logger, master_logger, message) + + # STEP 3: Define criterion + criterion = nn.CrossEntropyLoss() + + # STEP 4: Define optimizer and lr_scheduler + # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) + if not config.EVAL: + if config.TRAIN.LINEAR_SCALED_LR is not None: + effective_batch_size = config.DATA.BATCH_SIZE * config.TRAIN.ACCUM_ITER * world_size + config.TRAIN.BASE_LR = ( + config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR + ) + write_log(local_logger, master_logger, f'Base lr is scaled to: {config.TRAIN.BASE_LR}') + + # define scaler for amp training + if config.AMP: + amp_grad_scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + else: + amp_grad_scaler = None + # set gradient clip + if config.TRAIN.GRAD_CLIP: + clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) + else: + clip = None + # set optimizer + if config.TRAIN.OPTIMIZER.NAME == "AdamW": + params_groups = lr_decay.param_groups_lrd( + model=model, + no_weight_decay_list=['encoder_position_embedding', 'cls_token'], + weight_decay=config.TRAIN.WEIGHT_DECAY, + layer_decay=config.TRAIN.LAYER_DECAY) + optimizer = paddle.optimizer.AdamW( + parameters=params_groups, + learning_rate=config.TRAIN.BASE_LR, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + beta1=config.TRAIN.OPTIMIZER.BETAS[0], + beta2=config.TRAIN.OPTIMIZER.BETAS[1], + weight_decay=config.TRAIN.WEIGHT_DECAY, # set by params_groups, this vaule is not effectitve + epsilon=config.TRAIN.OPTIMIZER.EPS, + grad_clip=clip) + elif config.TRAIN.OPTIMIZER.NAME == "AdamWDL": + name_dict = dict() + wd_exclude_list = ['encoder_position_embedding', 'cls_token'] + for n, p in model.named_parameters(): + # name_dict is for AdamWDL argument 'name_dict' + name_dict[p.name] = n + # add no decay param name to weight exclude list, for AramWDL argument 'apply_decay_param_fn' + if p.ndim == 1 or n.endswith('.bias'): + wd_exclude_list.append(n) + #print('no_decay param names: ', wd_exclude_list) + + optimizer = paddlenlp.ops.optimizer.AdamWDL( + learning_rate=config.TRAIN.BASE_LR, + weight_decay=config.TRAIN.WEIGHT_DECAY, + layerwise_decay=config.TRAIN.LAYER_DECAY, + n_layers=config.MODEL.TRANS.ENCODER.DEPTH, + set_param_lr_fun=lr_decay.lr_setting, + parameters=model.parameters(), + name_dict=name_dict, + apply_decay_param_fun=get_exclude_from_weight_decay_fn(wd_exclude_list), + beta1=config.TRAIN.OPTIMIZER.BETAS[0], + beta2=config.TRAIN.OPTIMIZER.BETAS[1], + epsilon=config.TRAIN.OPTIMIZER.EPS, + grad_clip=clip) + elif config.TRAIN.OPTIMIZER.NAME == "LARS": + optimizer = paddle.optimizer.Momentum( + learning_rate=config.TRAIN.BASE_LR, + parameters=model.classifier.parameters(), + momentum=0.9, + grad_clip=None, + weight_decay=None, # set by fleet lars + ) + else: + message = f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}." + write_log(local_logger, master_logger, message, None, 'fatal') + raise NotImplementedError(message) + + # STEP 5: Load pretrained model / load resumt model and optimizer states + if config.MODEL.PRETRAINED: + assert os.path.isfile(config.MODEL.PRETRAINED) is True + model_state = paddle.load(config.MODEL.PRETRAINED) + if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch + # pretrain only load model weight, opt and epoch are ignored + model_state = model_state['model'] + if not config.EVAL: + keys = ['encoder.norm.weight', 'encoder.norm.bias', + 'classfier.weight', 'classifier.bias'] + if config.MODEL.GLOBAL_POOL: + if keys[0] in model_state: + del model_state[keys[0]] + if keys[1] in model_state: + del model_state[keys[1]] + if keys[2] in model_state: + del model_state[keys[2]] + if keys[3] in model_state: + del model_state[keys[3]] + + # interpolate position embedding + interpolate_pos_embed(model, model_state) + + model.set_state_dict(model_state) + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" + write_log(local_logger, master_logger, message) + + # for linear prob: add bn1d to classifier layer + model.classifier = nn.Sequential( + nn.BatchNorm1D(model.classifier.weight.shape[0], weight_attr=False, bias_attr=False, epsilon=1e-6), + model.classifier) + # freeze all but the classifier + for _, p in model.named_parameters(): + p.stop_gradient = True + for _, p in model.classifier.named_parameters(): + p.stop_gradient = False + + for n, p in model.named_parameters(): + print(n, p.shape, p.stop_gradient) + + if config.MODEL.RESUME: + assert os.path.isfile(config.MODEL.RESUME) is True + model_state = paddle.load(config.MODEL.RESUME) + if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch + model.set_state_dict(model_state['model']) + if 'optimizer' in model_state and 'epoch' in model_state: + optimizer.set_state_dict(model_state['optimizer']) + # last_epoch = 1 means training from epoch 2 (1 + 1) + config.TRAIN.LAST_EPOCH = model_state['epoch'] + 1 + if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None: + amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler']) + message = (f"----- Resume Training: Load model from {config.MODEL.RESUME}, " + f"opt = [{'optimizer' in model_state}], " + f"epoch = [{model_state.get('epoch', -1)}], " + f"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]") + write_log(local_logger, master_logger, message) + else: # direct load pdparams without other items + message = f"----- Resume Training: Load model from {config.MODEL.RESUME}, no opt, epoch, or scaler is set!" + write_log(local_logger, master_logger, message, 'warning') + model.set_dict(model_state) + + if dist.get_world_size() > 1: + model = fleet.distributed_model(model) + + # STEP 7: Validation (eval mode) + if config.EVAL: + write_log(local_logger, master_logger, f"----- Start Validation") + val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( + dataloader=dataloader_val, + model=model, + criterion=criterion, + total_batches=total_batch_val, + debug_steps=config.REPORT_FREQ, + local_logger=local_logger, + master_logger=master_logger) + + local_message = (f"----- Validation: " + + f"Validation Loss: {val_loss:.4f}, " + + f"Validation Acc@1: {val_acc1:.4f}, " + + f"Validation Acc@5: {val_acc5:.4f}, " + + f"time: {val_time:.2f}") + + master_message = (f"----- Validation: " + + f"Validation Loss: {avg_loss:.4f}, " + + f"Validation Acc@1: {avg_acc1:.4f}, " + + f"Validation Acc@5: {avg_acc5:.4f}, " + + f"time: {val_time:.2f}") + write_log(local_logger, master_logger, local_message, master_message) + return + + + # STEP 7: Start training (train mode) + write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") + for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): + # train + write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") + + train_loss, train_acc, avg_loss, avg_acc, train_time = train( + dataloader=dataloader_train, + model=model, + optimizer=optimizer, + criterion=criterion, + base_lr=config.TRAIN.BASE_LR, + min_lr=config.TRAIN.END_LR, + epoch=epoch, + warmup_epochs=config.TRAIN.WARMUP_EPOCHS, + total_epochs=config.TRAIN.NUM_EPOCHS, + total_batches=total_batch_train, + debug_steps=config.REPORT_FREQ, + accum_iter=config.TRAIN.ACCUM_ITER, + mixup_fn=None, + amp_grad_scaler=amp_grad_scaler, + local_logger=local_logger, + master_logger=master_logger) + + general_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + f"Lr: {optimizer.get_lr():.4f}, " + f"time: {train_time:.2f}") + + local_message = (general_message + + f"Train Loss: {train_loss:.4f}, " + f"Train Acc: {train_acc:.4f}") + master_message = (general_message + + f"Train Loss: {avg_loss:.4f}, " + f"Train Acc: {avg_acc:.4f}") + + write_log(local_logger, master_logger, local_message, master_message) + + # validation + if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: + write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}') + val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate( + dataloader=dataloader_val, + model=model, + criterion=criterion, + total_batches=total_batch_val, + debug_steps=config.REPORT_FREQ, + local_logger=local_logger, + master_logger=master_logger) + + local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {val_loss:.4f}, " + + f"Validation Acc@1: {val_acc1:.4f}, " + + f"Validation Acc@5: {val_acc5:.4f}, " + + f"time: {val_time:.2f}") + + master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + + f"Validation Loss: {avg_loss:.4f}, " + + f"Validation Acc@1: {avg_acc1:.4f}, " + + f"Validation Acc@5: {avg_acc5:.4f}, " + + f"time: {val_time:.2f}") + write_log(local_logger, master_logger, local_message, master_message) + + # model save + if local_rank == 0: + if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: + model_path = os.path.join( + config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{avg_loss}.pdparams") + state_dict = dict() + state_dict['model'] = model.state_dict() + state_dict['optimizer'] = optimizer.state_dict() + state_dict['epoch'] = epoch + if amp_grad_scaler is not None: + state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict() + paddle.save(state_dict, model_path) + message = (f"----- Save model: {model_path}") + write_log(local_logger, master_logger, message) + + +def main(): + # config is updated by: (1) config.py, (2) yaml file, (3) arguments + arguments = get_arguments() + config = get_config() + config = update_config(config, arguments) + # set output folder + if not config.EVAL: + config.SAVE = '{}/linearprobing-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) + else: + config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) + if not os.path.exists(config.SAVE): + os.makedirs(config.SAVE, exist_ok=True) + # get train dataset if in train mode + if config.EVAL: + dataset_train = None + else: + dataset_train = get_dataset(config, mode='train') + # get val dataset + dataset_val = get_dataset(config, mode='val') + # start training + #config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS + #dist.spawn(main_worker, args=(config, dataset_train, dataset_val), nprocs=config.NGPUS) + main_worker(config, dataset_train, dataset_val) + + +if __name__ == "__main__": + main() diff --git a/image_classification/MAE/main_multi_gpu_pretrain.py b/image_classification/MAE/main_multi_gpu_pretrain.py index b4757fa2..61580972 100644 --- a/image_classification/MAE/main_multi_gpu_pretrain.py +++ b/image_classification/MAE/main_multi_gpu_pretrain.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""MAE finetuning using multiple GPU """ +"""MAE pretraining using multiple GPU """ import sys import os @@ -49,6 +49,7 @@ def get_arguments(): parser.add_argument('-data_path', type=str, default=None) parser.add_argument('-output', type=str, default=None) parser.add_argument('-ngpus', type=int, default=None) + parser.add_argument('-accum_iter', type=int, default=None) parser.add_argument('-pretrained', type=str, default=None) parser.add_argument('-resume', type=str, default=None) parser.add_argument('-last_epoch', type=int, default=None) @@ -59,12 +60,14 @@ def get_arguments(): def get_logger(file_path): - """set logging file and format + """Set logging file and format, logs are written in 2 loggers, one local_logger records + the information on its own gpu/process, one master_logger records the overall/average + information over all gpus/processes. Args: - filename: str, full path of the logger file to write - logger_name: str, the logger name, e.g., 'master_logger', 'local_logger' + file_path: str, folder path of the logger files to write Return: - logger: python logger + local_logger: python logger for each process + master_logger: python logger for overall processes (on node 0) """ local_rank = dist.get_rank() filename = os.path.join(file_path, f'log_all.txt') @@ -101,26 +104,40 @@ def get_logger(file_path): def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): + """Write messages in loggers + Args: + local_logger: python logger, logs information on single gpu + master_logger: python logger, logs information over all gpus + msg_local: str, message to log on local_logger + msg_master: str, message to log on master_logger, if None, use msg_local, default: None + level: str, log level, in ['info', 'warning', 'fatal'], default: 'info' + """ + # write log to local logger if local_logger: if level == 'info': local_logger.info(msg_local) + elif level == 'warning': + local_logger.warning(msg_local) elif level == 'fatal': local_logger.fatal(msg_local) else: - raise ValueError("level must in ['info', 'fatal']") + raise ValueError("level must in ['info', 'warning', 'fatal']") + # write log to master logger on node 0 if master_logger and dist.get_rank() == 0: if msg_master is None: msg_master = msg_local - if level == 'info': master_logger.info("MASTER_LOG " + msg_master) + elif level == 'warning': + master_logger.warning("MASTER_LOG " + msg_master) elif level == 'fatal': master_logger.fatal("MASTER_LOG " + msg_master) else: - raise ValueError("level must in ['info', 'fatal']") + raise ValueError("level must in ['info', 'warning', 'fatal']") def all_reduce_mean(x): + """perform all_reduce on Tensor""" world_size = dist.get_world_size() if world_size > 1: x_reduce = paddle.to_tensor(x) @@ -140,10 +157,10 @@ def train(dataloader, epoch, warmup_epochs, total_epochs, - total_batch, + total_batches, debug_steps=100, accum_iter=1, - amp=False, + amp_grad_scaler=None, local_logger=None, master_logger=None): """Training for one epoch @@ -156,10 +173,10 @@ def train(dataloader, min_lr: float, minimum lr epoch: int, current epoch total_epochs: int, total num of epochs - total_batch: int, total num of batches for one epoch + total_batches: int, total num of batches for one epoch debug_steps: int, num of iters to log info, default: 100 accum_iter: int, num of iters for accumulating gradients, default: 1 - amp: bool, if True, use mix precision training, default: False + amp_grad_scaler: GradScaler/None, if not None, pass the GradScaler and enable AMP training, default: None local_logger: logger for local process/gpu, default: None master_logger: logger for main process, default: None Returns: @@ -173,8 +190,8 @@ def train(dataloader, time_st = time.time() - if amp is True: - scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + #if amp is True: + # scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 optimizer.clear_grad() for batch_id, data in enumerate(dataloader): @@ -186,11 +203,11 @@ def train(dataloader, adjust_learning_rate(optimizer, base_lr, min_lr, - batch_id / total_batch + epoch - 1, + batch_id / total_batches + epoch - 1, warmup_epochs, total_epochs) # forward - with paddle.amp.auto_cast(amp is True): + with paddle.amp.auto_cast(amp_grad_scaler is not None): loss, _, _ = model(images) loss_value = loss.item() @@ -199,19 +216,20 @@ def train(dataloader, sys.exit(1) loss = loss / accum_iter - - if not amp: # fp32 + + # backward and step + if amp_grad_scaler is None: # fp32 loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): optimizer.step() optimizer.clear_grad() - else: - scaled_loss = scaler.scale(loss) + else: # amp + scaled_loss = amp_grad_scaler.scale(loss) scaled_loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 - scaler.step(optimizer) - scaler.update() + amp_grad_scaler.step(optimizer) + amp_grad_scaler.update() optimizer.clear_grad() # sync from other gpus for overall loss and acc @@ -220,18 +238,17 @@ def train(dataloader, master_loss_meter.update(master_loss, master_batch_size) train_loss_meter.update(loss_value, batch_size) if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader): - local_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Lr: {optimizer.get_lr():04f}, " + + general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + f"Step[{batch_id:04d}/{total_batches:04d}], " + f"Lr: {optimizer.get_lr():04f}, ") + local_message = (general_message + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f})") - master_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + - f"Step[{batch_id:04d}/{total_batch:04d}], " + - f"Lr: {optimizer.get_lr():04f}, " + - f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f})") + master_message = (general_message + + f"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f})") write_log(local_logger, master_logger, local_message, master_message) - train_time = time.time() - time_st dist.barrier() + train_time = time.time() - time_st return train_loss_meter.avg, master_loss_meter.avg, train_time @@ -264,7 +281,7 @@ def main_worker(*args): dataset_train = args[1] dataloader_train = get_dataloader(config, dataset_train, 'train', True) total_batch_train = len(dataloader_train) - message = f'----- Total # of train batch (single gpu): {total_batch_train}' + message = f'----- Total # of train batch (on single gpu): {total_batch_train}' write_log(local_logger, master_logger, message) # STEP 3: Define optimizer and lr_scheduler @@ -275,13 +292,17 @@ def main_worker(*args): config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR ) write_log(local_logger, master_logger, f'Base lr is scaled to: {config.TRAIN.BASE_LR}') - - + # define scaler for amp training + if config.AMP is True: + amp_grad_scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 + else: + amp_grad_scaler = None + # set gradient clip if config.TRAIN.GRAD_CLIP: clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) else: clip = None - + # set optimizer if config.TRAIN.OPTIMIZER.NAME == "AdamW": #wd_exclude_list = ['encoder_position_embedding', 'cls_token'] wd_exclude_list = [] @@ -290,7 +311,7 @@ def main_worker(*args): continue if len(p.shape) == 1 or n.endswith('.bias'): wd_exclude_list.append(n) - print('no_decay param names: ', wd_exclude_list) + #print('no_decay param names: ', wd_exclude_list) optimizer = paddle.optimizer.AdamW( parameters=model.parameters(), learning_rate=config.TRAIN.BASE_LR, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, @@ -311,8 +332,7 @@ def main_worker(*args): continue if len(p.shape) == 1 or n.endswith('.bias'): wd_exclude_list.append(n) - print('no_decay param names: ', wd_exclude_list) - + #print('no_decay param names: ', wd_exclude_list) optimizer = paddlenlp.ops.optimizer.AdamWDL( learning_rate=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY, @@ -333,30 +353,41 @@ def main_worker(*args): # STEP 4: Load pretrained model / load resumt model and optimizer states if config.MODEL.PRETRAINED: - assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True - model_state = paddle.load(config.MODEL.PRETRAINED + '.pdparams') - model.set_state_dict(model_state) - message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}.pdparams" + assert os.path.isfile(config.MODEL.PRETRAINED) is True + model_state = paddle.load(config.MODEL.PRETRAINED) + if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch + # pretrain only load model weight, opt and epoch are ignored + model.set_state_dict(model_state['model']) + else: # direct load pdparams without other items + model.set_state_dict(model_state) + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" write_log(local_logger, master_logger, message) if config.MODEL.RESUME: - assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True - assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True - model_state = paddle.load(config.MODEL.RESUME+'.pdparams') - model.set_dict(model_state) - opt_state = paddle.load(config.MODEL.RESUME+'.pdopt') - optimizer.set_state_dict(opt_state) - message = f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}.pdparams/.pdopts" - write_log(local_logger, master_logger, message) - if config.TRAIN.LAST_EPOCH == -1: - message = f"----- Resume Training: LAST_EPOCH should not be [-1]" - write_log(local_logger, master_logger, message, None, 'fatal') + assert os.path.isfile(config.MODEL.RESUME) is True + model_state = paddle.load(config.MODEL.RESUME) + if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch + model.set_state_dict(model_state['model']) + if 'optimizer' in model_state and 'epoch' in model_state: + optimizer.set_state_dict(model_state['optimizer']) + config.TRAIN.LAST_EPOCH = model_state['epoch'] + 1 + if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None: + amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler']) + message = (f"----- Resume Training: Load model from {config.MODEL.RESUME}, " + f"opt = [{'optimizer' in model_state}], " + f"epoch = [{model_state.get('epoch', -1)}], " + f"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]") + write_log(local_logger, master_logger, message) + else: # direct load pdparams without other items + message = f"----- Resume Training: Load model from {config.MODEL.RESUME}, no opt, epoch, or scaler is set!" + write_log(local_logger, master_logger, message, 'warning') + model.set_dict(model_state) + # STEP 5: Start training (train mode) if dist.get_world_size() > 1: model = fleet.distributed_model(model) - # STEP 5: Start training (train mode) - write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.") + write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch + 1}.") for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): # train write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") @@ -371,33 +402,35 @@ def main_worker(*args): epoch=epoch, warmup_epochs=config.TRAIN.WARMUP_EPOCHS, total_epochs=config.TRAIN.NUM_EPOCHS, - total_batch=total_batch_train, + total_batches=total_batch_train, debug_steps=config.REPORT_FREQ, accum_iter=config.TRAIN.ACCUM_ITER, - amp=config.AMP, + amp_grad_scaler=amp_grad_scaler, local_logger=local_logger, master_logger=master_logger) - local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Lr: {optimizer.get_lr():.4f}, " + - f"Train Loss: {train_loss:.4f}, " + - f"time: {train_time:.2f}") - - master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + - f"Lr: {optimizer.get_lr():.4f}, " + - f"Train Loss: {avg_loss:.4f}, " + - f"time: {train_time:.2f}") + general_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " + f"Lr: {optimizer.get_lr():.4f}, " + f"time: {train_time:.2f}, ") + local_message = (general_message + + f"Train Loss: {train_loss:.4f}") + master_message = (general_message + + f"Train Loss: {avg_loss:.4f}") write_log(local_logger, master_logger, local_message, master_message) # model save if local_rank == 0: if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS: model_path = os.path.join( - config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{avg_loss}") - paddle.save(model.state_dict(), model_path + '.pdparams') - paddle.save(optimizer.state_dict(), model_path + '.pdopt') - message = (f"----- Save model: {model_path}.pdparams \n" + - f"----- Save optim: {model_path}.pdopt") + config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{avg_loss}.pdparams") + state_dict = dict() + state_dict['model'] = model.state_dict() + state_dict['optimizer'] = optimizer.state_dict() + state_dict['epoch'] = epoch + if amp_grad_scaler is not None: + state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict() + paddle.save(state_dict, model_path) + message = (f"----- Save model: {model_path}") write_log(local_logger, master_logger, message) @@ -407,7 +440,7 @@ def main(): config = get_config() config = update_config(config, arguments) # set output folder - config.SAVE = '{}/finetuning-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S')) + config.SAVE = '{}/pretrain-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) if not os.path.exists(config.SAVE): os.makedirs(config.SAVE, exist_ok=True) # get dataset diff --git a/image_classification/MAE/run_linearprobe_vit_b.sh b/image_classification/MAE/run_linearprobe_vit_b.sh new file mode 100644 index 00000000..e954f863 --- /dev/null +++ b/image_classification/MAE/run_linearprobe_vit_b.sh @@ -0,0 +1,10 @@ +#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +#python main_multi_gpu_linearprobe.py \ +GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_linearprobe.py \ +-cfg='./configs/vit_base_patch16_224_linearprobe.yaml' \ +-dataset='imagenet2012' \ +-batch_size=512 \ +-accum_iter=2 \ # orriginal effective batch_size = 512bs * 4nodes * 8gpus. So for 2 node, accum_iter should be 2 +-data_path='/dataset/imagenet' \ +-pretrained='./mae_pretrain_vit_base.pdparams' \ +-amp \ diff --git a/image_classification/MAE/run_linearprobe_vit_b_single_node.sh b/image_classification/MAE/run_linearprobe_vit_b_single_node.sh new file mode 100644 index 00000000..6b07f906 --- /dev/null +++ b/image_classification/MAE/run_linearprobe_vit_b_single_node.sh @@ -0,0 +1,9 @@ +#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ +#python main_multi_gpu_linearprobe.py \ +GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_linearprobe.py \ +-cfg='./configs/vit_base_patch16_224_linearprobe_single_node.yaml' \ +-dataset='imagenet2012' \ +-batch_size=512 \ +-data_path='/dataset/imagenet' \ +-pretrained='./mae_pretrain_vit_base.pdparams' \ +-amp \ From 3aa22f5d630704627212c95c8e6a41155fe208f3 Mon Sep 17 00:00:00 2001 From: xperzy Date: Wed, 23 Mar 2022 22:19:43 +0800 Subject: [PATCH 12/12] refactore mae --- ...base_patch16_224_finetune_single_node.yaml | 47 - .../MAE/run_finetune_vit_b.sh | 9 - .../MAE/run_finetune_vit_b_single_node.sh | 9 - .../MAE/run_pretrain_vit_b_debug.sh | 8 - .../MAE/README.md | 0 .../MAE/augment.py | 47 +- .../MAE/config.py | 108 +- .../vit_base_patch16_224_finetune.yaml | 15 +- .../vit_base_patch16_224_linearprobe.yaml | 15 +- ...e_patch16_224_linearprobe_single_node.yaml | 15 +- .../vit_base_patch16_224_pretrain.yaml | 27 +- .../vit_base_patch16_224_pretrain_dec1.yaml | 29 +- .../vit_huge_patch14_224_finetune.yaml | 15 +- .../vit_huge_patch14_224_linearprobe.yaml | 15 +- .../vit_huge_patch14_224_pretrain.yaml | 27 +- .../vit_large_patch16_224_finetune.yaml | 15 +- .../vit_large_patch16_224_linearprobe.yaml | 15 +- .../vit_large_patch16_224_pretrain.yaml | 27 +- .../MAE/datasets.py | 76 +- .../MAE/droppath.py | 5 +- .../MAE/load_pytorch_weights.py | 5 +- .../MAE/load_pytorch_weights_finetune.py | 10 +- .../MAE/losses.py | 17 +- .../MAE/lr_decay.py | 0 .../MAE/mae.png | Bin .../MAE/main_multi_gpu_finetune.py | 276 +-- .../MAE/main_multi_gpu_linearprobe.py | 305 ++- .../MAE/main_multi_gpu_pretrain.py | 267 +-- .../MAE/mixup.py | 10 +- self_supervised_learning/MAE/nohup.out | 1677 +++++++++++++++++ .../MAE/pos_embed.py | 0 .../MAE/random_erasing.py | 0 .../MAE/run_finetune_vit_b.sh | 7 + .../MAE/run_finetune_vit_b_1node.sh | 8 + .../MAE/run_linearprobe_vit_b.sh | 0 .../MAE/run_linearprobe_vit_b_1node.sh | 3 +- .../MAE/run_pretrain_vit_b.sh | 0 .../MAE/transformer.py | 55 +- .../MAE/utils.py | 148 +- 39 files changed, 2379 insertions(+), 933 deletions(-) delete mode 100644 image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml delete mode 100644 image_classification/MAE/run_finetune_vit_b.sh delete mode 100644 image_classification/MAE/run_finetune_vit_b_single_node.sh delete mode 100644 image_classification/MAE/run_pretrain_vit_b_debug.sh rename {image_classification => self_supervised_learning}/MAE/README.md (100%) rename {image_classification => self_supervised_learning}/MAE/augment.py (95%) rename {image_classification => self_supervised_learning}/MAE/config.py (67%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_base_patch16_224_finetune.yaml (82%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_base_patch16_224_linearprobe.yaml (65%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml (65%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_base_patch16_224_pretrain.yaml (53%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml (50%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_huge_patch14_224_finetune.yaml (83%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_huge_patch14_224_linearprobe.yaml (64%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_huge_patch14_224_pretrain.yaml (52%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_large_patch16_224_finetune.yaml (83%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_large_patch16_224_linearprobe.yaml (65%) rename {image_classification => self_supervised_learning}/MAE/configs/vit_large_patch16_224_pretrain.yaml (53%) rename {image_classification => self_supervised_learning}/MAE/datasets.py (79%) rename {image_classification => self_supervised_learning}/MAE/droppath.py (93%) rename {image_classification => self_supervised_learning}/MAE/load_pytorch_weights.py (98%) rename {image_classification => self_supervised_learning}/MAE/load_pytorch_weights_finetune.py (96%) rename {image_classification => self_supervised_learning}/MAE/losses.py (93%) rename {image_classification => self_supervised_learning}/MAE/lr_decay.py (100%) rename {image_classification => self_supervised_learning}/MAE/mae.png (100%) rename {image_classification => self_supervised_learning}/MAE/main_multi_gpu_finetune.py (76%) rename {image_classification => self_supervised_learning}/MAE/main_multi_gpu_linearprobe.py (75%) rename {image_classification => self_supervised_learning}/MAE/main_multi_gpu_pretrain.py (63%) rename {image_classification => self_supervised_learning}/MAE/mixup.py (97%) create mode 100644 self_supervised_learning/MAE/nohup.out rename {image_classification => self_supervised_learning}/MAE/pos_embed.py (100%) rename {image_classification => self_supervised_learning}/MAE/random_erasing.py (100%) create mode 100644 self_supervised_learning/MAE/run_finetune_vit_b.sh create mode 100644 self_supervised_learning/MAE/run_finetune_vit_b_1node.sh rename image_classification/MAE/run_linearprobe_vit_b_single_node.sh => self_supervised_learning/MAE/run_linearprobe_vit_b.sh (100%) rename image_classification/MAE/run_linearprobe_vit_b.sh => self_supervised_learning/MAE/run_linearprobe_vit_b_1node.sh (64%) rename {image_classification => self_supervised_learning}/MAE/run_pretrain_vit_b.sh (100%) rename {image_classification => self_supervised_learning}/MAE/transformer.py (94%) rename {image_classification => self_supervised_learning}/MAE/utils.py (61%) diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml b/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml deleted file mode 100644 index af89b84b..00000000 --- a/image_classification/MAE/configs/vit_base_patch16_224_finetune_single_node.yaml +++ /dev/null @@ -1,47 +0,0 @@ -DATA: - IMAGE_SIZE: 224 - CROP_PCT: 0.875 -MODEL: - TYPE: FINETUNE - NAME: vit_base_patch16_224 - DROPPATH: 0.1 - GLOBAL_POOL: True - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 -TRAIN: - ACCUM_ITER: 4 # set batch size to 32 - NUM_EPOCHS: 100 # same as MAE official readme - WARMUP_EPOCHS: 5 - WEIGHT_DECAY: 0.05 - BASE_LR: 5e-4 - LINEAR_SCALED_LR: 256 - END_LR: 1e-6 - OPTIMIZER: - NAME: 'AdamWDL' - BETAS: (0.9, 0.999) - LAYER_DECAY: 0.65 - SMOOTHING: 0.1 - RAND_AUGMENT: True - RAND_AUGMENT_LAYERS: 2 - RAND_AUGMENT_MAGNITUDE: 9 - MIXUP_ALPHA: 0.8 - MIXUP_PROB: 1.0 - MIXUP_SWITCH_PROB: 0.5 - MIXUP_MODE: 'batch' - CUTMIX_ALPHA: 1.0 - CUTMIX_MINMAX: None - RANDOM_ERASE_PROB: 0.25 - RANDOM_ERASE_MODE: 'pixel' - RANDOM_ERASE_COUNT: 1 - RANDOM_ERASE_SPLIT: False - -VALIDATE_FREQ: 1 -SAVE_FREQ: 1 -REPORT_FREQ: 20 - diff --git a/image_classification/MAE/run_finetune_vit_b.sh b/image_classification/MAE/run_finetune_vit_b.sh deleted file mode 100644 index 12585cd1..00000000 --- a/image_classification/MAE/run_finetune_vit_b.sh +++ /dev/null @@ -1,9 +0,0 @@ -#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ -#python main_multi_gpu_finetune.py \ -GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_finetune.py \ --cfg='./configs/vit_base_patch16_224_finetune.yaml' \ --dataset='imagenet2012' \ --batch_size=32 \ --data_path='/dataset/imagenet' \ --pretrained='./mae_vit_base_patch16' \ --amp \ diff --git a/image_classification/MAE/run_finetune_vit_b_single_node.sh b/image_classification/MAE/run_finetune_vit_b_single_node.sh deleted file mode 100644 index 79ba7514..00000000 --- a/image_classification/MAE/run_finetune_vit_b_single_node.sh +++ /dev/null @@ -1,9 +0,0 @@ -#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ -#python main_multi_gpu_finetune.py \ -GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_finetune.py \ --cfg='./configs/vit_base_patch16_224_finetune_single_node.yaml' \ --dataset='imagenet2012' \ --batch_size=32 \ --data_path='/dataset/imagenet' \ --pretrained='./mae_vit_base_patch16' \ --amp \ diff --git a/image_classification/MAE/run_pretrain_vit_b_debug.sh b/image_classification/MAE/run_pretrain_vit_b_debug.sh deleted file mode 100644 index e78f350a..00000000 --- a/image_classification/MAE/run_pretrain_vit_b_debug.sh +++ /dev/null @@ -1,8 +0,0 @@ -#CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ -#python main_multi_gpu_pretrain.py \ -GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_pretrain.py \ --cfg='./configs/vit_base_patch16_224_pretrain_dec1.yaml' \ --dataset='imagenet2012' \ --batch_size=64 \ --data_path='/dataset/imagenet' \ --amp \ diff --git a/image_classification/MAE/README.md b/self_supervised_learning/MAE/README.md similarity index 100% rename from image_classification/MAE/README.md rename to self_supervised_learning/MAE/README.md diff --git a/image_classification/MAE/augment.py b/self_supervised_learning/MAE/augment.py similarity index 95% rename from image_classification/MAE/augment.py rename to self_supervised_learning/MAE/augment.py index b6ffbe12..51b41090 100644 --- a/image_classification/MAE/augment.py +++ b/self_supervised_learning/MAE/augment.py @@ -11,26 +11,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Augmentation""" -""" Rand Augmentation """ -# reference: RandAugment: Practical automated data augmentation with a reduced search space -# https://arxiv.org/abs/1909.13719 - -""" Auto Augmentation """ -# reference: AutoAugment: Learning Augmentation Policies from Data -# https://arxiv.org/abs/1805.09501 +"""Augmentation +RandAug: +- reference: RandAugment: Practical automated data augmentation with a reduced search space +- https://arxiv.org/abs/1909.13719 +AutoAug: +- reference: AutoAugment: Learning Augmentation Policies from Data +- https://arxiv.org/abs/1805.09501 +""" import random -import math import numpy as np -from PIL import Image, ImageEnhance, ImageOps, ImageChops -import PIL +from PIL import Image, ImageEnhance, ImageOps LEVEL_DENOM = 10 +#fill color is set to 128 instead fo image mean -#NOTE: fill color is set to 128 instead fo image mean -def auto_augment_policy_v0(hparams): +def auto_augment_policy_v0(): """policy v0: hack from timm""" # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. policy = [ @@ -56,7 +54,7 @@ def auto_augment_policy_v0(hparams): [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], - [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)], ] @@ -64,7 +62,7 @@ def auto_augment_policy_v0(hparams): return policy -def auto_augment_policy_v0r(hparams): +def auto_augment_policy_v0r(): """policy v0r: hack from timm""" # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used # in Google research implementation (number of bits discarded increases with magnitude) @@ -99,7 +97,7 @@ def auto_augment_policy_v0r(hparams): return policy -def auto_augment_policy_originalr(hparams): +def auto_augment_policy_originalr(): """policy originalr: hack from timm""" # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation policy = [ @@ -185,8 +183,8 @@ def __call__(self, image, policy_idx=None): policy_idx = random.randint(0, len(self.policy) - 1) sub_policy = self.policy[policy_idx] - for op in sub_policy: - image = op(image) + for operation in sub_policy: + image = operation(image) return image @@ -315,7 +313,7 @@ def __init__(self, op_name, prob, magnitude, magnitude_std=0.5): self.magnitude = magnitude self.magnitude_std = magnitude_std - self.op = image_ops[op_name] + self.ops = image_ops[op_name] self.level_fn = level_fn[op_name] def __call__(self, image): @@ -332,16 +330,16 @@ def __call__(self, image): upper_bound = LEVEL_DENOM magnitude = max(0, min(magnitude, upper_bound)) level_args = self.level_fn(magnitude) if self.level_fn is not None else tuple() - image = self.op(image, *level_args) + image = self.ops(image, *level_args) return image ################################################################# # Convert level to Image op arguments ################################################################# -def randomly_negate(v): +def randomly_negate(value): """negate the value with 0.5 prob""" - return -v if random.random() > 0.5 else v + return -value if random.random() > 0.5 else value def shear_level_to_arg(level): @@ -482,8 +480,8 @@ def solarize_add(image, add, thresh=128): if image.mode == "RGB" and len(lut) == 256: lut = lut + lut + lut return image.point(lut) - else: - return image + + return image def posterize(image, bits_to_keep): @@ -506,4 +504,3 @@ def brightness(image, factor): def sharpness(image, factor): return ImageEnhance.Sharpness(image).enhance(factor) - diff --git a/image_classification/MAE/config.py b/self_supervised_learning/MAE/config.py similarity index 67% rename from image_classification/MAE/config.py rename to self_supervised_learning/MAE/config.py index 5ef1372d..3860b1e9 100644 --- a/image_classification/MAE/config.py +++ b/self_supervised_learning/MAE/config.py @@ -13,11 +13,8 @@ # limitations under the License. """Configuration - -Configuration for data, model archtecture, and training, etc. -Config can be set by .yaml file or by argparser(limited usage) - - +Configurations for (1) data processing, (2) model archtecture, and (3) training settings, etc. +Config can be set by .yaml file or by argparser """ import os from yacs.config import CfgNode as CN @@ -29,14 +26,15 @@ # data settings _C.DATA = CN() _C.DATA.BATCH_SIZE = 256 # train batch_size on single GPU -_C.DATA.BATCH_SIZE_EVAL = 256 # (disabled in update_config) val batch_size on single GPU +_C.DATA.BATCH_SIZE_EVAL = None # (disabled in update_config) val batch_size on single GPU _C.DATA.DATA_PATH = '/dataset/imagenet/' # path to dataset _C.DATA.DATASET = 'imagenet2012' # dataset name, currently only support imagenet2012 -_C.DATA.IMAGE_SIZE = 224 # input image size: 224 for pretrain -_C.DATA.CROP_PCT = 0.875 # input image scale ratio, scale is applied before centercrop in eval mode +_C.DATA.IMAGE_SIZE = 224 # input image size e.g., 224 +_C.DATA.IMAGE_CHANNELS = 3 # input image channels: e.g., 3 +_C.DATA.CROP_PCT = 0.875 # input image scale ratio, scale is applied before centercrop in eval mode _C.DATA.NUM_WORKERS = 2 # number of data loading threads -_C.DATA.IMAGENET_MEAN = [0.485, 0.456, 0.406] # [0.5, 0.5, 0.5] # imagenet mean values -_C.DATA.IMAGENET_STD = [0.229, 0.224, 0.225] # [0.5, 0.5, 0.5] # imagenet std values +_C.DATA.IMAGENET_MEAN = [0.485, 0.456, 0.406] # imagenet mean values +_C.DATA.IMAGENET_STD = [0.229, 0.224, 0.225] # imagenet std values # model settings _C.MODEL = CN() @@ -51,20 +49,19 @@ _C.MODEL.GLOBAL_POOL = False # Pretrain: N/A, Finetune: True, Linearprobe: False # transformer settings -_C.MODEL.TRANS = CN() -_C.MODEL.TRANS.PATCH_SIZE = 16 -_C.MODEL.TRANS.MLP_RATIO = 4.0 -_C.MODEL.TRANS.QKV_BIAS = True -_C.MODEL.TRANS.MASK_RATIO = 0.75 -_C.MODEL.TRANS.NORM_PIX_LOSS = True # effective only for Pretrain -_C.MODEL.TRANS.ENCODER = CN() -_C.MODEL.TRANS.ENCODER.DEPTH = 12 -_C.MODEL.TRANS.ENCODER.EMBED_DIM = 768 -_C.MODEL.TRANS.ENCODER.NUM_HEADS = 12 -_C.MODEL.TRANS.DECODER = CN() -_C.MODEL.TRANS.DECODER.DEPTH = 8 -_C.MODEL.TRANS.DECODER.EMBED_DIM = 512 -_C.MODEL.TRANS.DECODER.NUM_HEADS = 16 +_C.MODEL.PATCH_SIZE = 16 +_C.MODEL.MLP_RATIO = 4.0 +_C.MODEL.QKV_BIAS = True +_C.MODEL.MASK_RATIO = 0.75 +_C.MODEL.NORM_PIX_LOSS = True # effective only for Pretrain +_C.MODEL.ENCODER = CN() +_C.MODEL.ENCODER.DEPTH = 12 +_C.MODEL.ENCODER.EMBED_DIM = 768 +_C.MODEL.ENCODER.NUM_HEADS = 12 +_C.MODEL.DECODER = CN() +_C.MODEL.DECODER.DEPTH = 8 +_C.MODEL.DECODER.EMBED_DIM = 512 +_C.MODEL.DECODER.NUM_HEADS = 16 # training settings (for Vit-L/16 pretrain) @@ -74,13 +71,19 @@ _C.TRAIN.WARMUP_EPOCHS = 40 _C.TRAIN.WEIGHT_DECAY = 0.05 _C.TRAIN.BASE_LR = 1.5e-4 -_C.TRAIN.WARMUP_START_LR = 1e-6 # 0.0 # not used in MAE +_C.TRAIN.WARMUP_START_LR = 0.0 _C.TRAIN.END_LR = 0.0 # 1e-6 _C.TRAIN.GRAD_CLIP = None _C.TRAIN.ACCUM_ITER = 1 -_C.TRAIN.LINEAR_SCALED_LR = 256 +_C.TRAIN.LINEAR_SCALED_LR = 512 _C.TRAIN.LAYER_DECAY = None # used for finetuning only +# optimizer +_C.TRAIN.OPTIMIZER = CN() +_C.TRAIN.OPTIMIZER.NAME = 'AdamW' +_C.TRAIN.OPTIMIZER.EPS = 1e-8 +_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.95) + # train augmentation (only for finetune) _C.TRAIN.SMOOTHING = 0.1 _C.TRAIN.COLOR_JITTER = 0.4 @@ -101,34 +104,25 @@ _C.TRAIN.RANDOM_ERASE_COUNT = 1 _C.TRAIN.RANDOM_ERASE_SPLIT = False - -_C.TRAIN.LR_SCHEDULER = CN() -_C.TRAIN.LR_SCHEDULER.NAME = 'warmupcosine' -_C.TRAIN.LR_SCHEDULER.MILESTONES = "30, 60, 90" # only used in StepLRScheduler -_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 # only used in StepLRScheduler -_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 # only used in StepLRScheduler - -_C.TRAIN.OPTIMIZER = CN() -_C.TRAIN.OPTIMIZER.NAME = 'AdamW' -_C.TRAIN.OPTIMIZER.EPS = 1e-8 -_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.95) # for adamW same as pytorch MAE -_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9 - - # misc -_C.SAVE = "./output" -_C.TAG = "default" +_C.SAVE = "./output" # output folder, saves logs and weights _C.SAVE_FREQ = 10 # freq to save chpt _C.REPORT_FREQ = 20 # freq to logging info _C.VALIDATE_FREQ = 1 # freq to do validation -_C.SEED = 0 +_C.SEED = 0 # random seed _C.EVAL = False # run evaluation only -_C.AMP = False # mix precision training -_C.LOCAL_RANK = 0 -_C.NGPUS = -1 # not used in MAE fleet launch +_C.AMP = False # auto mix precision training def _update_config_from_file(config, cfg_file): + """Load cfg file (.yaml) and update config object + + Args: + config: config object + cfg_file: config file (.yaml) + Return: + None + """ config.defrost() with open(cfg_file, 'r') as infile: yaml_cfg = yaml.load(infile, Loader=yaml.FullLoader) @@ -137,13 +131,13 @@ def _update_config_from_file(config, cfg_file): _update_config_from_file( config, os.path.join(os.path.dirname(cfg_file), cfg) ) - print('merging config from {}'.format(cfg_file)) config.merge_from_file(cfg_file) config.freeze() def update_config(config, args): """Update config by ArgumentParser + Configs that are often used can be updated from arguments Args: args: ArgumentParser contains options Return: @@ -156,35 +150,31 @@ def update_config(config, args): config.DATA.DATASET = args.dataset if args.batch_size: config.DATA.BATCH_SIZE = args.batch_size + config.DATA.BATCH_SIZE_EVAL = args.batch_size + if args.batch_size_eval: + config.DATA.BATCH_SIZE_EVAL = args.batch_size_eval if args.image_size: config.DATA.IMAGE_SIZE = args.image_size - if args.data_path: - config.DATA.DATA_PATH = args.data_path - if args.ngpus: - config.NGPUS = args.ngpus if args.accum_iter: config.TRAIN.ACCUM_ITER = args.accum_iter + if args.data_path: + config.DATA.DATA_PATH = args.data_path if args.eval: config.EVAL = True - config.DATA.BATCH_SIZE_EVAL = args.batch_size if args.pretrained: config.MODEL.PRETRAINED = args.pretrained if args.resume: config.MODEL.RESUME = args.resume if args.last_epoch: config.TRAIN.LAST_EPOCH = args.last_epoch - if args.amp: # only during training - if config.EVAL is True: - config.AMP = False - else: - config.AMP = True - + if args.amp: # only for training + config.AMP = not config.EVAL # config.freeze() return config def get_config(cfg_file=None): - """Return a clone of config or load from yaml file""" + """Return a clone of config and optionally overwrite it from yaml file""" config = _C.clone() if cfg_file: _update_config_from_file(config, cfg_file) diff --git a/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml b/self_supervised_learning/MAE/configs/vit_base_patch16_224_finetune.yaml similarity index 82% rename from image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml rename to self_supervised_learning/MAE/configs/vit_base_patch16_224_finetune.yaml index 54eb672b..6bd2e558 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_finetune.yaml +++ b/self_supervised_learning/MAE/configs/vit_base_patch16_224_finetune.yaml @@ -6,14 +6,13 @@ MODEL: NAME: vit_base_patch16_224 DROPPATH: 0.1 GLOBAL_POOL: True - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 TRAIN: NUM_EPOCHS: 100 # same as MAE official readme WARMUP_EPOCHS: 5 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml b/self_supervised_learning/MAE/configs/vit_base_patch16_224_linearprobe.yaml similarity index 65% rename from image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml rename to self_supervised_learning/MAE/configs/vit_base_patch16_224_linearprobe.yaml index a3b66148..b4046fe8 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe.yaml +++ b/self_supervised_learning/MAE/configs/vit_base_patch16_224_linearprobe.yaml @@ -5,14 +5,13 @@ MODEL: TYPE: LINEARPROBE NAME: vit_base_patch16_224 GLOBAL_POOL: False # enable cls_token - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 TRAIN: NUM_EPOCHS: 90 WARMUP_EPOCHS: 10 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml b/self_supervised_learning/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml similarity index 65% rename from image_classification/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml rename to self_supervised_learning/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml index 35369139..2fb83fbc 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml +++ b/self_supervised_learning/MAE/configs/vit_base_patch16_224_linearprobe_single_node.yaml @@ -5,14 +5,13 @@ MODEL: TYPE: LINEARPROBE NAME: vit_base_patch16_224 GLOBAL_POOL: False # enable cls_token - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 TRAIN: ACCUM_ITER: 4 NUM_EPOCHS: 90 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml b/self_supervised_learning/MAE/configs/vit_base_patch16_224_pretrain.yaml similarity index 53% rename from image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml rename to self_supervised_learning/MAE/configs/vit_base_patch16_224_pretrain.yaml index 205be30a..df89c2e6 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_pretrain.yaml +++ b/self_supervised_learning/MAE/configs/vit_base_patch16_224_pretrain.yaml @@ -5,20 +5,19 @@ MODEL: TYPE: PRETRAIN NAME: vit_base_patch16_224 DROPPATH: 0.0 - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - MASK_RATIO: 0.75 - ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 - DECODER: - EMBED_DIM: 512 - DEPTH: 8 - NUM_HEADS: 16 - NORM_PIX_LOSS: True + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + MASK_RATIO: 0.75 + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 + DECODER: + EMBED_DIM: 512 + DEPTH: 8 + NUM_HEADS: 16 + NORM_PIX_LOSS: True TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 diff --git a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml b/self_supervised_learning/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml similarity index 50% rename from image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml rename to self_supervised_learning/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml index ad2b7a63..20646c66 100644 --- a/image_classification/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml +++ b/self_supervised_learning/MAE/configs/vit_base_patch16_224_pretrain_dec1.yaml @@ -5,20 +5,19 @@ MODEL: TYPE: PRETRAIN NAME: vit_base_patch16_224 DROPPATH: 0.0 - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - MASK_RATIO: 0.75 - ENCODER: - EMBED_DIM: 768 - DEPTH: 12 - NUM_HEADS: 12 - DECODER: - EMBED_DIM: 512 - DEPTH: 1 - NUM_HEADS: 16 - NORM_PIX_LOSS: True + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + MASK_RATIO: 0.75 + ENCODER: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 + DECODER: + EMBED_DIM: 512 + DEPTH: 1 + NUM_HEADS: 16 + NORM_PIX_LOSS: True TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 @@ -29,7 +28,7 @@ TRAIN: GRAD_CLIP: None ACCUM_ITER: 1 OPTIMIZER: - NAME: 'AdamW' + NAME: 'AdamWDL' BETAS: (0.9, 0.95) SAVE_FREQ: 1 diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml b/self_supervised_learning/MAE/configs/vit_huge_patch14_224_finetune.yaml similarity index 83% rename from image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml rename to self_supervised_learning/MAE/configs/vit_huge_patch14_224_finetune.yaml index 4b47a06a..f3bd0df1 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_finetune.yaml +++ b/self_supervised_learning/MAE/configs/vit_huge_patch14_224_finetune.yaml @@ -6,14 +6,13 @@ MODEL: NAME: vit_huge_patch14_224 DROPPATH: 0.3 GLOBAL_POOL: True - TRANS: - PATCH_SIZE: 14 - MLP_RATIO: 4.0 - QKV_BIAS: true - ENCODER: - EMBED_DIM: 1280 - DEPTH: 32 - NUM_HEADS: 16 + PATCH_SIZE: 14 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 1280 + DEPTH: 32 + NUM_HEADS: 16 TRAIN: NUM_EPOCHS: 50 WARMUP_EPOCHS: 5 diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml b/self_supervised_learning/MAE/configs/vit_huge_patch14_224_linearprobe.yaml similarity index 64% rename from image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml rename to self_supervised_learning/MAE/configs/vit_huge_patch14_224_linearprobe.yaml index 83e70b23..465d53dc 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_linearprobe.yaml +++ b/self_supervised_learning/MAE/configs/vit_huge_patch14_224_linearprobe.yaml @@ -5,14 +5,13 @@ MODEL: TYPE: LINEARPROBE NAME: vit_huge_patch14_224 GLOBAL_POOL: False - TRANS: - PATCH_SIZE: 14 - MLP_RATIO: 4.0 - QKV_BIAS: true - ENCODER: - EMBED_DIM: 1280 - DEPTH: 32 - NUM_HEADS: 16 + PATCH_SIZE: 14 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 1280 + DEPTH: 32 + NUM_HEADS: 16 TRAIN: NUM_EPOCHS: 90 WARMUP_EPOCHS: 10 diff --git a/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml b/self_supervised_learning/MAE/configs/vit_huge_patch14_224_pretrain.yaml similarity index 52% rename from image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml rename to self_supervised_learning/MAE/configs/vit_huge_patch14_224_pretrain.yaml index 485e9c76..a9bbc101 100644 --- a/image_classification/MAE/configs/vit_huge_patch14_224_pretrain.yaml +++ b/self_supervised_learning/MAE/configs/vit_huge_patch14_224_pretrain.yaml @@ -5,20 +5,19 @@ MODEL: TYPE: PRETRAIN NAME: vit_huge_patch14_224 DROPPATH: 0.0 - TRANS: - PATCH_SIZE: 14 - MLP_RATIO: 4.0 - QKV_BIAS: true - MASK_RATIO: 0.75 - ENCODER: - EMBED_DIM: 1280 - DEPTH: 32 - NUM_HEADS: 16 - DECODER: - EMBED_DIM: 512 - DEPTH: 8 - NUM_HEADS: 16 - NORM_PIX_LOSS: True + PATCH_SIZE: 14 + MLP_RATIO: 4.0 + QKV_BIAS: true + MASK_RATIO: 0.75 + ENCODER: + EMBED_DIM: 1280 + DEPTH: 32 + NUM_HEADS: 16 + DECODER: + EMBED_DIM: 512 + DEPTH: 8 + NUM_HEADS: 16 + NORM_PIX_LOSS: True TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 diff --git a/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml b/self_supervised_learning/MAE/configs/vit_large_patch16_224_finetune.yaml similarity index 83% rename from image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml rename to self_supervised_learning/MAE/configs/vit_large_patch16_224_finetune.yaml index 210f305c..e09b9e4a 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_finetune.yaml +++ b/self_supervised_learning/MAE/configs/vit_large_patch16_224_finetune.yaml @@ -6,14 +6,13 @@ MODEL: NAME: vit_large_patch16_224 DROPPATH: 0.2 # same as MAE official readme GLOBAL_POOL: True - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - ENCODER: - EMBED_DIM: 1024 - DEPTH: 24 - NUM_HEADS: 16 + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 1024 + DEPTH: 24 + NUM_HEADS: 16 TRAIN: NUM_EPOCHS: 50 WARMUP_EPOCHS: 5 diff --git a/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml b/self_supervised_learning/MAE/configs/vit_large_patch16_224_linearprobe.yaml similarity index 65% rename from image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml rename to self_supervised_learning/MAE/configs/vit_large_patch16_224_linearprobe.yaml index b01f3be6..249afd16 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_linearprobe.yaml +++ b/self_supervised_learning/MAE/configs/vit_large_patch16_224_linearprobe.yaml @@ -5,14 +5,13 @@ MODEL: TYPE: LINEARPROBE NAME: vit_large_patch16_224 GLOBAL_POOL: False # enable cls_token - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - ENCODER: - EMBED_DIM: 1024 - DEPTH: 24 - NUM_HEADS: 16 + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + ENCODER: + EMBED_DIM: 1024 + DEPTH: 24 + NUM_HEADS: 16 TRAIN: NUM_EPOCHS: 90 WARMUP_EPOCHS: 10 diff --git a/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml b/self_supervised_learning/MAE/configs/vit_large_patch16_224_pretrain.yaml similarity index 53% rename from image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml rename to self_supervised_learning/MAE/configs/vit_large_patch16_224_pretrain.yaml index 42ec4508..0fbd3d8e 100644 --- a/image_classification/MAE/configs/vit_large_patch16_224_pretrain.yaml +++ b/self_supervised_learning/MAE/configs/vit_large_patch16_224_pretrain.yaml @@ -5,20 +5,19 @@ MODEL: TYPE: PRETRAIN NAME: vit_large_patch16_224 DROPPATH: 0.0 - TRANS: - PATCH_SIZE: 16 - MLP_RATIO: 4.0 - QKV_BIAS: true - MASK_RATIO: 0.75 - ENCODER: - EMBED_DIM: 1024 - DEPTH: 24 - NUM_HEADS: 16 - DECODER: - EMBED_DIM: 512 - DEPTH: 8 - NUM_HEADS: 16 - NORM_PIX_LOSS: True + PATCH_SIZE: 16 + MLP_RATIO: 4.0 + QKV_BIAS: true + MASK_RATIO: 0.75 + ENCODER: + EMBED_DIM: 1024 + DEPTH: 24 + NUM_HEADS: 16 + DECODER: + EMBED_DIM: 512 + DEPTH: 8 + NUM_HEADS: 16 + NORM_PIX_LOSS: True TRAIN: NUM_EPOCHS: 800 WARMUP_EPOCHS: 40 diff --git a/image_classification/MAE/datasets.py b/self_supervised_learning/MAE/datasets.py similarity index 79% rename from image_classification/MAE/datasets.py rename to self_supervised_learning/MAE/datasets.py index 97414b06..91f8b30b 100644 --- a/image_classification/MAE/datasets.py +++ b/self_supervised_learning/MAE/datasets.py @@ -12,19 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -Dataset related classes and methods for ViT training and validation -Cifar10, Cifar100 and ImageNet2012 are supported -""" +"""Dataset related classes and methods for ViT training and validation""" import os import math -from PIL import Image from paddle.io import Dataset from paddle.io import DataLoader from paddle.io import DistributedBatchSampler from paddle.vision import transforms -from paddle.vision import datasets from paddle.vision import image_load from augment import auto_augment_policy_original from augment import AutoAugment @@ -39,7 +34,7 @@ class ImageNet2012Dataset(Dataset): This class gets train/val imagenet datasets, which loads transfomed data and labels. Note: train_list.txt and val_list.txt is required. - Please refer https://github.com/BR-IDL/PaddleViT/tree/mae_refactor/image_classification#data-preparation + Please refer https://github.com/BR-IDL/PaddleViT/image_classification#data-preparation Attributes: file_folder: path where imagenet images are stored @@ -48,19 +43,17 @@ class ImageNet2012Dataset(Dataset): label_list: list of labels of whole dataset """ - def __init__(self, file_folder, mode="train", transform=None): + def __init__(self, file_folder, is_train=True, transform_ops=None): """Init ImageNet2012 Dataset with dataset file path, mode(train/val), and transform""" - super(ImageNet2012Dataset, self).__init__() - assert mode in ["train", "val"] + super().__init__() self.file_folder = file_folder - self.transform = transform + self.transforms = transform_ops self.img_path_list = [] self.label_list = [] - if mode == "train": - self.list_file = os.path.join(self.file_folder, "train_list.txt") - else: - self.list_file = os.path.join(self.file_folder, "val_list.txt") + list_name = 'train_list.txt' if is_train else 'val_list.txt' + self.list_file = os.path.join(self.file_folder, list_name) + assert os.path.isfile(self.list_file), f'{self.list_file} not exist!' with open(self.list_file, 'r') as infile: for line in infile: @@ -68,14 +61,14 @@ def __init__(self, file_folder, mode="train", transform=None): img_label = int(line.strip().split()[1]) self.img_path_list.append(os.path.join(self.file_folder, img_path)) self.label_list.append(img_label) - print(f'----- Imagenet2012 image {mode} list len = {len(self.label_list)}') + print(f'----- Imagenet2012 {list_name} len = {len(self.label_list)}') def __len__(self): return len(self.label_list) def __getitem__(self, index): data = image_load(self.img_path_list[index]).convert('RGB') - data = self.transform(data) + data = self.transforms(data) label = self.label_list[index] return data, label @@ -178,14 +171,14 @@ def get_val_transforms(config): """ Get training transforms For validation, image is first Resize then CenterCrop to image_size. - Then normalization is applied with [0.5, 0.5, 0.5] mean and std. + Then normalization is applied with mean and std. The input pixel values must be rescaled to [0, 1.] Outputs is converted to tensor Args: config: configs contains IMAGE_SIZE, see config.py for details Returns: - transforms_train: training transforms + transforms_val: transform ops """ scale_size = int(math.floor(config.DATA.IMAGE_SIZE / config.DATA.CROP_PCT)) @@ -197,55 +190,56 @@ def get_val_transforms(config): return transforms_val -def get_dataset(config, mode='train'): +def get_dataset(config, is_train=True): """ Get dataset from config and mode (train/val) - Returns the related dataset object according to configs and mode(train/val) Args: config: configs contains dataset related settings. see config.py for details + is_train: bool, set True to use training set, otherwise val set. Default: True Returns: dataset: dataset object """ - assert mode in ['train', 'val', 'test'] - # both val and test use get_val_transforms if config.DATA.DATASET == "imagenet2012": - transform = get_train_transforms(config) if mode == 'train' else get_val_transforms(config) + if is_train: + transform_ops = get_train_transforms(config) + else: + transform_ops = get_val_transforms(config) dataset = ImageNet2012Dataset(config.DATA.DATA_PATH, - mode=mode, - transform=transform) + is_train=is_train, + transform_ops=transform_ops) else: raise NotImplementedError( - "[{config.DATA.DATASET}] Only cifar10, cifar100, imagenet2012 are supported now") + "Wrong dataset name: [{config.DATA.DATASET}]. Only 'imagenet2012' is supported now") return dataset -def get_dataloader(config, dataset, mode='train', multi_process=False): - """Get dataloader with config, dataset, mode as input, allows multiGPU settings. - - Multi-GPU loader is implements as distributedBatchSampler. +def get_dataloader(config, dataset, is_train=True, use_dist_sampler=False): + """Get dataloader from dataset, allows multiGPU settings. + Multi-GPU loader is implements as distributedBatchSampler. Args: config: see config.py for details dataset: paddle.io.dataset object - mode: train/val - multi_process: if True, use DistributedBatchSampler to support multi-processing + is_train: bool, when False, shuffle is off and BATCH_SIZE_EVAL is used, default: True + use_dist_sampler: if True, DistributedBatchSampler is used, default: False Returns: dataloader: paddle.io.DataLoader object. """ + batch_size = config.DATA.BATCH_SIZE if is_train else config.DATA.BATCH_SIZE_EVAL - batch_size = config.DATA.BATCH_SIZE if mode == 'train' else config.DATA.BATCH_SIZE_EVAL - - if multi_process is True: - sampler = DistributedBatchSampler(dataset, + if use_dist_sampler is True: + sampler = DistributedBatchSampler(dataset=dataset, batch_size=batch_size, - shuffle=(mode == 'train')) - dataloader = DataLoader(dataset, + shuffle=is_train, + drop_last=is_train) + dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, num_workers=config.DATA.NUM_WORKERS) else: - dataloader = DataLoader(dataset, + dataloader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=config.DATA.NUM_WORKERS, - shuffle=(mode == 'train')) + shuffle=is_train, + drop_last=is_train) return dataloader diff --git a/image_classification/MAE/droppath.py b/self_supervised_learning/MAE/droppath.py similarity index 93% rename from image_classification/MAE/droppath.py rename to self_supervised_learning/MAE/droppath.py index 25b8d5ff..b32f7310 100644 --- a/image_classification/MAE/droppath.py +++ b/self_supervised_learning/MAE/droppath.py @@ -15,7 +15,6 @@ """ Droppath, reimplement from https://github.com/yueatsprograms/Stochastic_Depth """ - import paddle import paddle.nn as nn @@ -23,7 +22,7 @@ class DropPath(nn.Layer): """DropPath class""" def __init__(self, drop_prob=None): - super(DropPath, self).__init__() + super().__init__() self.drop_prob = drop_prob def drop_path(self, inputs): @@ -43,7 +42,7 @@ def drop_path(self, inputs): shape = (inputs.shape[0], ) + (1, ) * (inputs.ndim - 1) # shape=(N, 1, 1, 1) random_tensor = keep_prob + paddle.rand(shape, dtype=inputs.dtype) random_tensor = random_tensor.floor() # mask - output = inputs.divide(keep_prob) * random_tensor #divide is to keep same output expectation + output = inputs.divide(keep_prob) * random_tensor # divide to keep same output expectation return output def forward(self, inputs): diff --git a/image_classification/MAE/load_pytorch_weights.py b/self_supervised_learning/MAE/load_pytorch_weights.py similarity index 98% rename from image_classification/MAE/load_pytorch_weights.py rename to self_supervised_learning/MAE/load_pytorch_weights.py index 18e0fd86..28a118ca 100644 --- a/image_classification/MAE/load_pytorch_weights.py +++ b/self_supervised_learning/MAE/load_pytorch_weights.py @@ -30,9 +30,9 @@ random.seed(seed) -#model_type = 'base' +model_type = 'base' #model_type = 'large' -model_type = 'huge' +#model_type = 'huge' if model_type == 'base': model_name = 'mae_vit_base_patch16' @@ -213,6 +213,7 @@ def main(): noise = torch.Tensor(rp) # encoder out + # NOTE: need to modify the mae pytorch implementation out_torch = torch_model.forward_encoder(x_torch, 0.75, noise)[0] out_paddle = paddle_model.forward_encoder(x_paddle, 0.75, rand_probs)[0] diff --git a/image_classification/MAE/load_pytorch_weights_finetune.py b/self_supervised_learning/MAE/load_pytorch_weights_finetune.py similarity index 96% rename from image_classification/MAE/load_pytorch_weights_finetune.py rename to self_supervised_learning/MAE/load_pytorch_weights_finetune.py index 78ac7ceb..db8346d6 100644 --- a/image_classification/MAE/load_pytorch_weights_finetune.py +++ b/self_supervised_learning/MAE/load_pytorch_weights_finetune.py @@ -22,14 +22,14 @@ from config import * ## vit-base -#model_path='./mae_finetuned_vit_base' -#model_name = 'vit_base_patch16' +model_path='./mae_finetuned_vit_base' +model_name = 'vit_base_patch16' config = get_config(f'./configs/vit_base_patch16_224_finetune.yaml') # vit-large -model_path='./mae_finetuned_vit_large' -model_name = 'vit_large_patch16' -config = get_config(f'./configs/vit_large_patch16_224_finetune.yaml') +#model_path='./mae_finetuned_vit_large' +#model_name = 'vit_large_patch16' +#config = get_config(f'./configs/vit_large_patch16_224_finetune.yaml') # vit-huge #model_path='./mae_finetuned_vit_huge' diff --git a/image_classification/MAE/losses.py b/self_supervised_learning/MAE/losses.py similarity index 93% rename from image_classification/MAE/losses.py rename to self_supervised_learning/MAE/losses.py index 082467a3..674d6b41 100644 --- a/image_classification/MAE/losses.py +++ b/self_supervised_learning/MAE/losses.py @@ -54,9 +54,6 @@ class SoftTargetCrossEntropyLoss(nn.Layer): Returns: loss: float, the mean loss value """ - def __init__(self): - super().__init__() - def forward(self, x, target): loss = paddle.sum(-target * F.log_softmax(x, axis=-1), axis=-1) return loss.mean() @@ -64,16 +61,16 @@ def forward(self, x, target): class DistillationLoss(nn.Layer): """Distillation loss function - This layer includes the orginal loss (criterion) and a extra - distillation loss (criterion), which computes the loss with - different type options, between current model and + This layer includes the orginal loss (criterion) and a extra + distillation loss (criterion), which computes the loss with + different type options, between current model and a teacher model as its supervision. Args: base_criterion: nn.Layer, the original criterion teacher_model: nn.Layer, the teacher model as supervision distillation_type: str, one of ['none', 'soft', 'hard'] - alpha: float, ratio of base loss (* (1-alpha)) + alpha: float, ratio of base loss (* (1-alpha)) and distillation loss( * alpha) tao: float, temperature in distillation """ @@ -101,7 +98,9 @@ def forward(self, inputs, outputs, targets): in the last layer of the model targets: tensor, the labels for the base criterion """ - outputs, outputs_kd = outputs[0], outputs[1] + outputs_kd = None + if not isinstance(outputs, paddle.Tensor): + outputs, outputs_kd = outputs[0], outputs[1] base_loss = self.base_criterion(outputs, targets) if self.type == 'none': return base_loss @@ -119,5 +118,3 @@ def forward(self, inputs, outputs, targets): loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha return loss - - diff --git a/image_classification/MAE/lr_decay.py b/self_supervised_learning/MAE/lr_decay.py similarity index 100% rename from image_classification/MAE/lr_decay.py rename to self_supervised_learning/MAE/lr_decay.py diff --git a/image_classification/MAE/mae.png b/self_supervised_learning/MAE/mae.png similarity index 100% rename from image_classification/MAE/mae.png rename to self_supervised_learning/MAE/mae.png diff --git a/image_classification/MAE/main_multi_gpu_finetune.py b/self_supervised_learning/MAE/main_multi_gpu_finetune.py similarity index 76% rename from image_classification/MAE/main_multi_gpu_finetune.py rename to self_supervised_learning/MAE/main_multi_gpu_finetune.py index d887af3d..77d66666 100644 --- a/image_classification/MAE/main_multi_gpu_finetune.py +++ b/self_supervised_learning/MAE/main_multi_gpu_finetune.py @@ -17,45 +17,42 @@ import sys import os import time -import logging import argparse import random import math import numpy as np import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import paddle.distributed as dist from paddle.distributed import fleet from datasets import get_dataloader from datasets import get_dataset -from mixup import Mixup -from losses import LabelSmoothingCrossEntropyLoss -from losses import SoftTargetCrossEntropyLoss -from transformer import build_transformer as build_model +from config import get_config +from config import update_config from utils import AverageMeter -from utils import WarmupCosineScheduler -from utils import get_exclude_from_weight_decay_fn +from utils import get_logger +from utils import write_log +from utils import all_reduce_mean +from utils import skip_weight_decay_fn from utils import get_params_groups -from utils import cosine_scheduler from utils import adjust_learning_rate +from mixup import Mixup +from losses import LabelSmoothingCrossEntropyLoss +from losses import SoftTargetCrossEntropyLoss from utils import interpolate_pos_embed import lr_decay -from config import get_config -from config import update_config +from transformer import build_transformer as build_model import paddlenlp def get_arguments(): - """return argumeents, this will overwrite the config after loading yaml file""" - parser = argparse.ArgumentParser('MAE') + """return argumeents, this will overwrite the config by (1) yaml file (2) argument values""" + parser = argparse.ArgumentParser('MAE Finetune') parser.add_argument('-cfg', type=str, default=None) parser.add_argument('-dataset', type=str, default=None) - parser.add_argument('-batch_size', type=int, default=None) - parser.add_argument('-image_size', type=int, default=None) parser.add_argument('-data_path', type=str, default=None) parser.add_argument('-output', type=str, default=None) - parser.add_argument('-ngpus', type=int, default=None) + parser.add_argument('-batch_size', type=int, default=None) + parser.add_argument('-batch_size_eval', type=int, default=None) + parser.add_argument('-image_size', type=int, default=None) parser.add_argument('-accum_iter', type=int, default=None) parser.add_argument('-pretrained', type=str, default=None) parser.add_argument('-resume', type=str, default=None) @@ -66,99 +63,11 @@ def get_arguments(): return arguments -def get_logger(file_path): - """Set logging file and format, logs are written in 2 loggers, one local_logger records - the information on its own gpu/process, one master_logger records the overall/average - information over all gpus/processes. - Args: - file_path: str, folder path of the logger files to write - Return: - local_logger: python logger for each process - master_logger: python logger for overall processes (on node 0) - """ - local_rank = dist.get_rank() - filename = os.path.join(file_path, f'log_all.txt') - log_format = "%(asctime)s %(message)s" - logging.basicConfig(filename=filename, level=logging.INFO, - format=log_format, datefmt="%m%d %I:%M:%S %p") - - # local_logger for each process/GPU - local_logger = logging.getLogger(f'local_{local_rank}') - filename = os.path.join(file_path, f'log_{local_rank}.txt') - fh = logging.FileHandler(filename) - fh.setFormatter(logging.Formatter(log_format)) - local_logger.addHandler(fh) - ## console - #sh = logging.StreamHandler(sys.stdout) - #sh.setFormatter(logging.Formatter(log_format)) - #local_logger.addHandler(sh) - - # master_logger records avg performance - if local_rank == 0: - master_logger = logging.getLogger('master') - # log.txt - filename = os.path.join(file_path, f'log.txt') - fh = logging.FileHandler(filename) - fh.setFormatter(logging.Formatter(log_format)) - master_logger.addHandler(fh) - # console - sh = logging.StreamHandler(sys.stdout) - sh.setFormatter(logging.Formatter(log_format)) - master_logger.addHandler(sh) - else: - master_logger = None - return local_logger, master_logger - - -def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): - """Write messages in loggers - Args: - local_logger: python logger, logs information on single gpu - master_logger: python logger, logs information over all gpus - msg_local: str, message to log on local_logger - msg_master: str, message to log on master_logger, if None, use msg_local, default: None - level: str, log level, in ['info', 'warning', 'fatal'], default: 'info' - """ - # write log to local logger - if local_logger: - if level == 'info': - local_logger.info(msg_local) - elif level == 'warning': - local_logger.warning(msg_local) - elif level == 'fatal': - local_logger.fatal(msg_local) - else: - raise ValueError("level must in ['info', 'warning', 'fatal']") - # write log to master logger on node 0 - if master_logger and dist.get_rank() == 0: - if msg_master is None: - msg_master = msg_local - if level == 'info': - master_logger.info("MASTER_LOG " + msg_master) - elif level == 'warning': - master_logger.warning("MASTER_LOG " + msg_master) - elif level == 'fatal': - master_logger.fatal("MASTER_LOG " + msg_master) - else: - raise ValueError("level must in ['info', 'warning', 'fatal']") - - -def all_reduce_mean(x): - """perform all_reduce on Tensor""" - world_size = dist.get_world_size() - if world_size > 1: - x_reduce = paddle.to_tensor(x) - dist.all_reduce(x_reduce) - x_reduce = x_reduce / world_size - return x_reduce.item() - else: - return x - - def train(dataloader, model, optimizer, criterion, + lr_scheduler, base_lr, min_lr, epoch, @@ -216,12 +125,13 @@ def train(dataloader, images, label = mixup_fn(images, label_orig) if batch_id % accum_iter == 0: - adjust_learning_rate(optimizer, - base_lr, - min_lr, - batch_id / total_batches + epoch - 1, - warmup_epochs, - total_epochs) + lr_scheduler.step(batch_id / total_batches + epoch -1) + #adjust_learning_rate(optimizer, + # base_lr, + # min_lr, + # batch_id / total_batches + epoch - 1, + # warmup_epochs, + # total_epochs) # forward with paddle.amp.auto_cast(amp_grad_scaler is not None): output = model(images) @@ -233,7 +143,7 @@ def train(dataloader, sys.exit(1) loss = loss / accum_iter - + # backward and step if amp_grad_scaler is None: # fp32 loss.backward() @@ -244,12 +154,12 @@ def train(dataloader, scaled_loss = amp_grad_scaler.scale(loss) scaled_loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 + # amp for param group reference: https://github.com/PaddlePaddle/Paddle/issues/37188 amp_grad_scaler.step(optimizer) amp_grad_scaler.update() optimizer.clear_grad() - pred = F.softmax(output) + pred = paddle.nn.functional.softmax(output) if mixup_fn: acc = paddle.metric.accuracy(pred, label_orig).item() else: @@ -269,7 +179,7 @@ def train(dataloader, if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader): general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " f"Step[{batch_id:04d}/{total_batches:04d}], " - f"Lr: {optimizer.get_lr():04f}, ") + f"Lr: {optimizer.get_lr():.6e}, ") local_message = (general_message + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), " f"Avg Acc: {train_acc_meter.avg:.4f}") @@ -279,7 +189,7 @@ def train(dataloader, write_log(local_logger, master_logger, local_message, master_message) train_time = time.time() - time_st - dist.barrier() + paddle.distributed.barrier() return (train_loss_meter.avg, train_acc_meter.avg, master_loss_meter.avg, @@ -332,7 +242,7 @@ def validate(dataloader, loss = criterion(output, label) loss_value = loss.item() - pred = F.softmax(output) + pred = paddle.nn.functional.softmax(output) acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item() acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item() @@ -359,7 +269,7 @@ def validate(dataloader, f"Avg Acc@1: {master_acc1_meter.avg:.4f}, " + f"Avg Acc@5: {master_acc5_meter.avg:.4f}") write_log(local_logger, master_logger, local_message, master_message) - dist.barrier() + paddle.distrtibuted.barrier() val_time = time.time() - time_st return (val_loss_meter.avg, val_acc1_meter.avg, @@ -371,10 +281,12 @@ def validate(dataloader, def main_worker(*args): + """main method for each process""" # STEP 0: Preparation - #dist.init_parallel_env() - world_size = dist.get_world_size() - local_rank = dist.get_rank() + paddle.device.set_device('gpu') + #paddle.distributed.init_parallel_env() + world_size = paddle.distributed.get_world_size() + local_rank = paddle.distributed.get_rank() config = args[0] last_epoch = config.TRAIN.LAST_EPOCH seed = config.SEED + local_rank @@ -383,13 +295,13 @@ def main_worker(*args): random.seed(seed) # logger for each process/gpu local_logger, master_logger = get_logger(config.SAVE) - message = f'----- world_size = {world_size}, local_rank = {local_rank}' + message = (f'----- world_size = {world_size}, local_rank = {local_rank} \n' + f'----- {config}') write_log(local_logger, master_logger, message) - + # STEP 1: Create model - paddle.device.set_device('gpu') model = build_model(config) - if dist.get_world_size() > 1: + if paddle.distributed.get_world_size() > 1: strategy = fleet.DistributedStrategy() ## Hybrid Parallel Training strategy.hybrid_configs = {} @@ -398,13 +310,13 @@ def main_worker(*args): # STEP 2: Create train and val dataloader if not config.EVAL: dataset_train = args[1] - dataloader_train = get_dataloader(config, dataset_train, 'train', True) + dataloader_train = get_dataloader(config, dataset_train, True, True) total_batch_train = len(dataloader_train) message = f'----- Total # of train batch (single gpu): {total_batch_train}' write_log(local_logger, master_logger, message) dataset_val = args[2] - dataloader_val = get_dataloader(config, dataset_val, 'val', True) + dataloader_val = get_dataloader(config, dataset_val, False, True) total_batch_val = len(dataloader_val) message = f'----- Total # of val batch (single gpu): {total_batch_val}' write_log(local_logger, master_logger, message) @@ -426,9 +338,9 @@ def main_worker(*args): elif config.TRAIN.SMOOTHING: criterion = LabelSmoothingCrossEntropyLoss() else: - criterion = nn.CrossEntropyLoss() + criterion = paddle.nn.CrossEntropyLoss() # only use cross entropy for val - criterion_val = nn.CrossEntropyLoss() + criterion_val = paddle.nn.CrossEntropyLoss() # STEP 5: Define optimizer and lr_scheduler # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) @@ -441,25 +353,43 @@ def main_worker(*args): write_log(local_logger, master_logger, f'Base lr is scaled to: {config.TRAIN.BASE_LR}') # define scaler for amp training - if config.AMP: - amp_grad_scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 - else: - amp_grad_scaler = None + amp_grad_scaler = paddle.amp.GradScaler() if config.AMP else None # set gradient clip if config.TRAIN.GRAD_CLIP: clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) else: clip = None # set optimizer + # create warmup and cosine decay lr scheduler + if config.TRAIN.WARMUP_EPOCHS > 0: + cosine_lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=config.TRAIN.BASE_LR, + T_max=config.TRAIN.NUM_EPOCHS - config.TRAIN.WARMUP_EPOCHS, + eta_min=config.TRAIN.END_LR, + last_epoch=-1) # do not set last epoch, handled in warmup sched get_lr() + lr_scheduler = paddle.optimizer.lr.LinearWarmup( + learning_rate=cosine_lr_scheduler, # use cosine lr sched after warmup + warmup_steps=config.TRAIN.WARMUP_EPOCHS, # only support position integet + start_lr=config.TRAIN.WARMUP_START_LR, + end_lr=config.TRAIN.BASE_LR, + last_epoch=config.TRAIN.LAST_EPOCH) + else: # create cosine decay lr scheduler if no warmup epochs + lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=config.TRAIN.BASE_LR, + T_max=config.TRAIN.NUM_EPOCHS, + eta_min=config.TRAIN.END_LR, + last_epoch=config.TRAIN.LAST_EPOCH) + if config.TRAIN.OPTIMIZER.NAME == "AdamW": params_groups = lr_decay.param_groups_lrd( - model=model, + model=model, no_weight_decay_list=['encoder_position_embedding', 'cls_token'], weight_decay=config.TRAIN.WEIGHT_DECAY, layer_decay=config.TRAIN.LAYER_DECAY) + optimizer = paddle.optimizer.AdamW( parameters=params_groups, - learning_rate=config.TRAIN.BASE_LR, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + learning_rate=lr_scheduler, # now only support warmup + cosine beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], weight_decay=config.TRAIN.WEIGHT_DECAY, # set by params_groups, this vaule is not effectitve @@ -467,24 +397,20 @@ def main_worker(*args): grad_clip=clip) elif config.TRAIN.OPTIMIZER.NAME == "AdamWDL": name_dict = dict() - wd_exclude_list = ['encoder_position_embedding', 'cls_token'] for n, p in model.named_parameters(): # name_dict is for AdamWDL argument 'name_dict' name_dict[p.name] = n - # add no decay param name to weight exclude list, for AramWDL argument 'apply_decay_param_fn' - if p.ndim == 1 or n.endswith('.bias'): - wd_exclude_list.append(n) - #print('no_decay param names: ', wd_exclude_list) - optimizer = paddlenlp.ops.optimizer.AdamWDL( - learning_rate=config.TRAIN.BASE_LR, + learning_rate=lr_scheduler, weight_decay=config.TRAIN.WEIGHT_DECAY, layerwise_decay=config.TRAIN.LAYER_DECAY, - n_layers=config.MODEL.TRANS.ENCODER.DEPTH, + n_layers=config.MODEL.ENCODER.DEPTH, set_param_lr_fun=lr_decay.lr_setting, parameters=model.parameters(), name_dict=name_dict, - apply_decay_param_fun=get_exclude_from_weight_decay_fn(wd_exclude_list), + apply_decay_param_fun=skip_weight_decay_fn( + model, # skip bn and bias in model + ['encoder_position_embedding', 'cls_token']), # skip custom ops beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], epsilon=config.TRAIN.OPTIMIZER.EPS, @@ -518,6 +444,10 @@ def main_worker(*args): interpolate_pos_embed(model, model_state) model.set_state_dict(model_state) + # set fc layer initialization (follow official code) + init_fn = nn.initializer.TruncatedNormal(std=0.02) + init_fn(model.classifier.weight) + message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" write_log(local_logger, master_logger, message) @@ -526,23 +456,28 @@ def main_worker(*args): model_state = paddle.load(config.MODEL.RESUME) if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch model.set_state_dict(model_state['model']) - if 'optimizer' in model_state and 'epoch' in model_state: + if 'optimizer' in model_state: optimizer.set_state_dict(model_state['optimizer']) - # last_epoch = 1 means training from epoch 2 (1 + 1) - config.TRAIN.LAST_EPOCH = model_state['epoch'] + 1 + if 'epoch' in model_state: + config.TRAIN.LAST_EPOCH = model_state['epoch'] + if 'lr_scheduler' in model_state and lr_scheduler is not None: + lr_scheduler.set_state_dict(model_state['lr_scheduler']) if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None: amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler']) + lr_scheduler.step(config.TRAIN.LAST_EPOCH) message = (f"----- Resume Training: Load model from {config.MODEL.RESUME}, " f"opt = [{'optimizer' in model_state}], " + f"lr_scheduler = [{'lr_scheduler' in model_state}], " + f"model_ema = [{'model_ema' in model_state}], " f"epoch = [{model_state.get('epoch', -1)}], " f"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]") write_log(local_logger, master_logger, message) else: # direct load pdparams without other items - message = f"----- Resume Training: Load model from {config.MODEL.RESUME}, no opt, epoch, or scaler is set!" + message = f"----- Resume Training: Load from {config.MODEL.RESUME}, no opt/epoch/scaler" write_log(local_logger, master_logger, message, 'warning') - model.set_dict(model_state) + model.set_state_dict(model_state) - if dist.get_world_size() > 1: + if paddle.distributed.get_world_size() > 1: model = fleet.distributed_model(model) # STEP 7: Validation (eval mode) @@ -583,6 +518,7 @@ def main_worker(*args): model=model, optimizer=optimizer, criterion=criterion, + lr_scheduler=lr_scheduler, base_lr=config.TRAIN.BASE_LR, min_lr=config.TRAIN.END_LR, epoch=epoch, @@ -597,7 +533,7 @@ def main_worker(*args): master_logger=master_logger) general_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " - f"Lr: {optimizer.get_lr():.4f}, " + f"Lr: {optimizer.get_lr():.6e}, " f"time: {train_time:.2f}") local_message = (general_message + @@ -644,34 +580,30 @@ def main_worker(*args): state_dict['optimizer'] = optimizer.state_dict() state_dict['epoch'] = epoch if amp_grad_scaler is not None: - state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict() + state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict() + if lr_scheduler is not None: + state_dict['lr_scheduler'] = lr_scheduler.state_dict() paddle.save(state_dict, model_path) message = (f"----- Save model: {model_path}") write_log(local_logger, master_logger, message) def main(): - # config is updated by: (1) config.py, (2) yaml file, (3) arguments - arguments = get_arguments() - config = get_config() - config = update_config(config, arguments) + # config is updated in order: (1) default in config.py, (2) yaml file, (3) arguments + config = update_config(get_config(), get_arguments()) + # set output folder - if not config.EVAL: - config.SAVE = '{}/finetuning-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) - else: - config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) + config.SAVE = os.path.join(config.SAVE, + f"{'eval' if config.EVAL else 'finetune'}-{time.strftime('%Y%m%d-%H-%M')}") if not os.path.exists(config.SAVE): os.makedirs(config.SAVE, exist_ok=True) - # get train dataset if in train mode - if config.EVAL: - dataset_train = None - else: - dataset_train = get_dataset(config, mode='train') - # get val dataset - dataset_val = get_dataset(config, mode='val') - # start training - #config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS - #dist.spawn(main_worker, args=(config, dataset_train, dataset_val), nprocs=config.NGPUS) + + # get train dataset if in train mode and val dataset + dataset_train = get_dataset(config, is_train=True) if not config.EVAL else None + dataset_val = get_dataset(config, is_train=False) + + # dist spawn lunch: use CUDA_VISIBLE_DEVICES to set available gpus + #paddle.distributed.spawn(main_worker, args=(config, dataset_train, dataset_val)) main_worker(config, dataset_train, dataset_val) diff --git a/image_classification/MAE/main_multi_gpu_linearprobe.py b/self_supervised_learning/MAE/main_multi_gpu_linearprobe.py similarity index 75% rename from image_classification/MAE/main_multi_gpu_linearprobe.py rename to self_supervised_learning/MAE/main_multi_gpu_linearprobe.py index a8a09ff1..8b30c4c6 100644 --- a/image_classification/MAE/main_multi_gpu_linearprobe.py +++ b/self_supervised_learning/MAE/main_multi_gpu_linearprobe.py @@ -17,45 +17,42 @@ import sys import os import time -import logging import argparse import random import math import numpy as np import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import paddle.distributed as dist from paddle.distributed import fleet from datasets import get_dataloader from datasets import get_dataset -from mixup import Mixup -from losses import LabelSmoothingCrossEntropyLoss -from losses import SoftTargetCrossEntropyLoss -from transformer import build_transformer as build_model +from config import get_config +from config import update_config from utils import AverageMeter -from utils import WarmupCosineScheduler -from utils import get_exclude_from_weight_decay_fn +from utils import get_logger +from utils import write_log +from utils import all_reduce_mean +from utils import skip_weight_decay_fn from utils import get_params_groups -from utils import cosine_scheduler from utils import adjust_learning_rate +from mixup import Mixup +from losses import LabelSmoothingCrossEntropyLoss +from losses import SoftTargetCrossEntropyLoss from utils import interpolate_pos_embed import lr_decay -from config import get_config -from config import update_config +from transformer import build_transformer as build_model import paddlenlp def get_arguments(): - """return argumeents, this will overwrite the config after loading yaml file""" - parser = argparse.ArgumentParser('MAE') + """return argumeents, this will overwrite the config by (1) yaml file (2) argument values""" + parser = argparse.ArgumentParser('MAE Linearprobe') parser.add_argument('-cfg', type=str, default=None) parser.add_argument('-dataset', type=str, default=None) - parser.add_argument('-batch_size', type=int, default=None) - parser.add_argument('-image_size', type=int, default=None) parser.add_argument('-data_path', type=str, default=None) parser.add_argument('-output', type=str, default=None) - parser.add_argument('-ngpus', type=int, default=None) + parser.add_argument('-batch_size', type=int, default=None) + parser.add_argument('-batch_size_eval', type=int, default=None) + parser.add_argument('-image_size', type=int, default=None) parser.add_argument('-accum_iter', type=int, default=None) parser.add_argument('-pretrained', type=str, default=None) parser.add_argument('-resume', type=str, default=None) @@ -66,99 +63,11 @@ def get_arguments(): return arguments -def get_logger(file_path): - """Set logging file and format, logs are written in 2 loggers, one local_logger records - the information on its own gpu/process, one master_logger records the overall/average - information over all gpus/processes. - Args: - file_path: str, folder path of the logger files to write - Return: - local_logger: python logger for each process - master_logger: python logger for overall processes (on node 0) - """ - local_rank = dist.get_rank() - filename = os.path.join(file_path, f'log_all.txt') - log_format = "%(asctime)s %(message)s" - logging.basicConfig(filename=filename, level=logging.INFO, - format=log_format, datefmt="%m%d %I:%M:%S %p") - - # local_logger for each process/GPU - local_logger = logging.getLogger(f'local_{local_rank}') - filename = os.path.join(file_path, f'log_{local_rank}.txt') - fh = logging.FileHandler(filename) - fh.setFormatter(logging.Formatter(log_format)) - local_logger.addHandler(fh) - ## console - #sh = logging.StreamHandler(sys.stdout) - #sh.setFormatter(logging.Formatter(log_format)) - #local_logger.addHandler(sh) - - # master_logger records avg performance - if local_rank == 0: - master_logger = logging.getLogger('master') - # log.txt - filename = os.path.join(file_path, f'log.txt') - fh = logging.FileHandler(filename) - fh.setFormatter(logging.Formatter(log_format)) - master_logger.addHandler(fh) - # console - sh = logging.StreamHandler(sys.stdout) - sh.setFormatter(logging.Formatter(log_format)) - master_logger.addHandler(sh) - else: - master_logger = None - return local_logger, master_logger - - -def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): - """Write messages in loggers - Args: - local_logger: python logger, logs information on single gpu - master_logger: python logger, logs information over all gpus - msg_local: str, message to log on local_logger - msg_master: str, message to log on master_logger, if None, use msg_local, default: None - level: str, log level, in ['info', 'warning', 'fatal'], default: 'info' - """ - # write log to local logger - if local_logger: - if level == 'info': - local_logger.info(msg_local) - elif level == 'warning': - local_logger.warning(msg_local) - elif level == 'fatal': - local_logger.fatal(msg_local) - else: - raise ValueError("level must in ['info', 'warning', 'fatal']") - # write log to master logger on node 0 - if master_logger and dist.get_rank() == 0: - if msg_master is None: - msg_master = msg_local - if level == 'info': - master_logger.info("MASTER_LOG " + msg_master) - elif level == 'warning': - master_logger.warning("MASTER_LOG " + msg_master) - elif level == 'fatal': - master_logger.fatal("MASTER_LOG " + msg_master) - else: - raise ValueError("level must in ['info', 'warning', 'fatal']") - - -def all_reduce_mean(x): - """perform all_reduce on Tensor""" - world_size = dist.get_world_size() - if world_size > 1: - x_reduce = paddle.to_tensor(x) - dist.all_reduce(x_reduce) - x_reduce = x_reduce / world_size - return x_reduce.item() - else: - return x - - def train(dataloader, model, optimizer, criterion, + lr_scheduler, base_lr, min_lr, epoch, @@ -216,12 +125,13 @@ def train(dataloader, images, label = mixup_fn(images, label_orig) if batch_id % accum_iter == 0: - adjust_learning_rate(optimizer, - base_lr, - min_lr, - batch_id / total_batches + epoch - 1, - warmup_epochs, - total_epochs) + lr_scheduler.step(batch_id / total_batches + epoch -1) + #adjust_learning_rate(optimizer, + # base_lr, + # min_lr, + # batch_id / total_batches + epoch - 1, + # warmup_epochs, + # total_epochs) # forward with paddle.amp.auto_cast(amp_grad_scaler is not None): output = model(images) @@ -233,7 +143,7 @@ def train(dataloader, sys.exit(1) loss = loss / accum_iter - + # backward and step if amp_grad_scaler is None: # fp32 loss.backward() @@ -244,12 +154,12 @@ def train(dataloader, scaled_loss = amp_grad_scaler.scale(loss) scaled_loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 + # amp for param group reference: https://github.com/PaddlePaddle/Paddle/issues/37188 amp_grad_scaler.step(optimizer) amp_grad_scaler.update() optimizer.clear_grad() - pred = F.softmax(output) + pred = paddle.nn.functional.softmax(output) if mixup_fn: acc = paddle.metric.accuracy(pred, label_orig).item() else: @@ -269,7 +179,7 @@ def train(dataloader, if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader): general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " f"Step[{batch_id:04d}/{total_batches:04d}], " - f"Lr: {optimizer.get_lr():04f}, ") + f"Lr: {optimizer.get_lr():.6e}, ") local_message = (general_message + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), " f"Avg Acc: {train_acc_meter.avg:.4f}") @@ -279,7 +189,7 @@ def train(dataloader, write_log(local_logger, master_logger, local_message, master_message) train_time = time.time() - time_st - dist.barrier() + paddle.distributed.barrier() return (train_loss_meter.avg, train_acc_meter.avg, master_loss_meter.avg, @@ -332,7 +242,7 @@ def validate(dataloader, loss = criterion(output, label) loss_value = loss.item() - pred = F.softmax(output) + pred = paddle.nn.functional.softmax(output) acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item() acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item() @@ -359,7 +269,7 @@ def validate(dataloader, f"Avg Acc@1: {master_acc1_meter.avg:.4f}, " + f"Avg Acc@5: {master_acc5_meter.avg:.4f}") write_log(local_logger, master_logger, local_message, master_message) - dist.barrier() + paddle.distributed.barrier() val_time = time.time() - time_st return (val_loss_meter.avg, val_acc1_meter.avg, @@ -371,10 +281,12 @@ def validate(dataloader, def main_worker(*args): + """main method for each process""" # STEP 0: Preparation - #dist.init_parallel_env() - world_size = dist.get_world_size() - local_rank = dist.get_rank() + paddle.device.set_device('gpu') + #paddle.distributed.init_parallel_env() + world_size = paddle.distributed.get_world_size() + local_rank = paddle.distributed.get_rank() config = args[0] last_epoch = config.TRAIN.LAST_EPOCH seed = config.SEED + local_rank @@ -383,13 +295,26 @@ def main_worker(*args): random.seed(seed) # logger for each process/gpu local_logger, master_logger = get_logger(config.SAVE) - message = f'----- world_size = {world_size}, local_rank = {local_rank}' + message = (f'----- world_size = {world_size}, local_rank = {local_rank} \n' + f'----- {config}') write_log(local_logger, master_logger, message) - + # STEP 1: Create model - paddle.device.set_device('gpu') model = build_model(config) - if dist.get_world_size() > 1: + # for linear prob: add bn1d to classifier layer + model.classifier = paddle.nn.Sequential( + paddle.nn.BatchNorm1D(model.classifier.weight.shape[0], weight_attr=False, bias_attr=False, epsilon=1e-6), + model.classifier) + # freeze all but the classifier + for _, p in model.named_parameters(): + p.stop_gradient = True + for _, p in model.classifier.named_parameters(): + p.stop_gradient = False + + for n, p in model.named_parameters(): + print(n, p.shape, p.stop_gradient) + + if paddle.distributed.get_world_size() > 1: strategy = fleet.DistributedStrategy() # lars if config.TRAIN.OPTIMIZER.NAME == "LARS": @@ -397,9 +322,8 @@ def main_worker(*args): strategy.lars_configs = { "lars_coeff": 0.001, "lars_weight_decay": config.TRAIN.WEIGHT_DECAY, - "exclude_from_weight_decay": ['classifier.0._mean', 'classifier.0._variance'] + "exclude_from_weight_decay": ['cls_token', 'encoder_position_embedding', 'classifier.0', 'classifier.1.bias'] } - ## Hybrid Parallel Training strategy.hybrid_configs = {} fleet.init(is_collective=True, strategy=strategy) @@ -407,19 +331,19 @@ def main_worker(*args): # STEP 2: Create train and val dataloader if not config.EVAL: dataset_train = args[1] - dataloader_train = get_dataloader(config, dataset_train, 'train', True) + dataloader_train = get_dataloader(config, dataset_train, True, True) total_batch_train = len(dataloader_train) message = f'----- Total # of train batch (single gpu): {total_batch_train}' write_log(local_logger, master_logger, message) dataset_val = args[2] - dataloader_val = get_dataloader(config, dataset_val, 'val', True) + dataloader_val = get_dataloader(config, dataset_val, False, True) total_batch_val = len(dataloader_val) message = f'----- Total # of val batch (single gpu): {total_batch_val}' write_log(local_logger, master_logger, message) # STEP 3: Define criterion - criterion = nn.CrossEntropyLoss() + criterion = paddle.nn.CrossEntropyLoss() # STEP 4: Define optimizer and lr_scheduler # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin) @@ -432,25 +356,43 @@ def main_worker(*args): write_log(local_logger, master_logger, f'Base lr is scaled to: {config.TRAIN.BASE_LR}') # define scaler for amp training - if config.AMP: - amp_grad_scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 - else: - amp_grad_scaler = None + amp_grad_scaler = paddle.amp.GradScaler() if config.AMP else None # set gradient clip if config.TRAIN.GRAD_CLIP: clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) else: clip = None # set optimizer + # create warmup and cosine decay lr scheduler + if config.TRAIN.WARMUP_EPOCHS > 0: + cosine_lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=config.TRAIN.BASE_LR, + T_max=config.TRAIN.NUM_EPOCHS - config.TRAIN.WARMUP_EPOCHS, + eta_min=config.TRAIN.END_LR, + last_epoch=-1) # do not set last epoch, handled in warmup sched get_lr() + lr_scheduler = paddle.optimizer.lr.LinearWarmup( + learning_rate=cosine_lr_scheduler, # use cosine lr sched after warmup + warmup_steps=config.TRAIN.WARMUP_EPOCHS, # only support position integet + start_lr=config.TRAIN.WARMUP_START_LR, + end_lr=config.TRAIN.BASE_LR, + last_epoch=config.TRAIN.LAST_EPOCH) + else: # create cosine decay lr scheduler if no warmup epochs + lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=config.TRAIN.BASE_LR, + T_max=config.TRAIN.NUM_EPOCHS, + eta_min=config.TRAIN.END_LR, + last_epoch=config.TRAIN.LAST_EPOCH) + if config.TRAIN.OPTIMIZER.NAME == "AdamW": params_groups = lr_decay.param_groups_lrd( model=model, no_weight_decay_list=['encoder_position_embedding', 'cls_token'], weight_decay=config.TRAIN.WEIGHT_DECAY, layer_decay=config.TRAIN.LAYER_DECAY) + optimizer = paddle.optimizer.AdamW( parameters=params_groups, - learning_rate=config.TRAIN.BASE_LR, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + learning_rate=lr_scheduler, # now only support warmup + cosine beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], weight_decay=config.TRAIN.WEIGHT_DECAY, # set by params_groups, this vaule is not effectitve @@ -458,34 +400,31 @@ def main_worker(*args): grad_clip=clip) elif config.TRAIN.OPTIMIZER.NAME == "AdamWDL": name_dict = dict() - wd_exclude_list = ['encoder_position_embedding', 'cls_token'] for n, p in model.named_parameters(): # name_dict is for AdamWDL argument 'name_dict' name_dict[p.name] = n - # add no decay param name to weight exclude list, for AramWDL argument 'apply_decay_param_fn' - if p.ndim == 1 or n.endswith('.bias'): - wd_exclude_list.append(n) - #print('no_decay param names: ', wd_exclude_list) optimizer = paddlenlp.ops.optimizer.AdamWDL( - learning_rate=config.TRAIN.BASE_LR, + learning_rate=lr_scheduler, weight_decay=config.TRAIN.WEIGHT_DECAY, layerwise_decay=config.TRAIN.LAYER_DECAY, - n_layers=config.MODEL.TRANS.ENCODER.DEPTH, + n_layers=config.MODEL.ENCODER.DEPTH, set_param_lr_fun=lr_decay.lr_setting, - parameters=model.parameters(), + parameters=model.classifier.parameters(), name_dict=name_dict, - apply_decay_param_fun=get_exclude_from_weight_decay_fn(wd_exclude_list), + apply_decay_param_fun=skip_weight_decay_fn( + model, # skip bn and bias in model + ['encoder_position_embedding', 'cls_token']), # skip custom ops beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], epsilon=config.TRAIN.OPTIMIZER.EPS, grad_clip=clip) elif config.TRAIN.OPTIMIZER.NAME == "LARS": optimizer = paddle.optimizer.Momentum( - learning_rate=config.TRAIN.BASE_LR, parameters=model.classifier.parameters(), + learning_rate=lr_scheduler, momentum=0.9, - grad_clip=None, + grad_clip=clip, weight_decay=None, # set by fleet lars ) else: @@ -493,6 +432,7 @@ def main_worker(*args): write_log(local_logger, master_logger, message, None, 'fatal') raise NotImplementedError(message) + # STEP 5: Load pretrained model / load resumt model and optimizer states if config.MODEL.PRETRAINED: assert os.path.isfile(config.MODEL.PRETRAINED) is True @@ -520,42 +460,36 @@ def main_worker(*args): message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}" write_log(local_logger, master_logger, message) - # for linear prob: add bn1d to classifier layer - model.classifier = nn.Sequential( - nn.BatchNorm1D(model.classifier.weight.shape[0], weight_attr=False, bias_attr=False, epsilon=1e-6), - model.classifier) - # freeze all but the classifier - for _, p in model.named_parameters(): - p.stop_gradient = True - for _, p in model.classifier.named_parameters(): - p.stop_gradient = False - - for n, p in model.named_parameters(): - print(n, p.shape, p.stop_gradient) - if config.MODEL.RESUME: assert os.path.isfile(config.MODEL.RESUME) is True model_state = paddle.load(config.MODEL.RESUME) if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch model.set_state_dict(model_state['model']) - if 'optimizer' in model_state and 'epoch' in model_state: + if 'optimizer' in model_state: optimizer.set_state_dict(model_state['optimizer']) - # last_epoch = 1 means training from epoch 2 (1 + 1) - config.TRAIN.LAST_EPOCH = model_state['epoch'] + 1 + if 'epoch' in model_state: + config.TRAIN.LAST_EPOCH = model_state['epoch'] + if 'lr_scheduler' in model_state and lr_scheduler is not None: + lr_scheduler.set_state_dict(model_state['lr_scheduler']) if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None: amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler']) + lr_scheduler.step(config.TRAIN.LAST_EPOCH) message = (f"----- Resume Training: Load model from {config.MODEL.RESUME}, " f"opt = [{'optimizer' in model_state}], " + f"lr_scheduler = [{'lr_scheduler' in model_state}], " + f"model_ema = [{'model_ema' in model_state}], " f"epoch = [{model_state.get('epoch', -1)}], " f"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]") write_log(local_logger, master_logger, message) else: # direct load pdparams without other items - message = f"----- Resume Training: Load model from {config.MODEL.RESUME}, no opt, epoch, or scaler is set!" + message = f"----- Resume Training: Load from {config.MODEL.RESUME}, no opt/epoch/scaler" write_log(local_logger, master_logger, message, 'warning') - model.set_dict(model_state) + model.set_state_dict(model_state) - if dist.get_world_size() > 1: + if paddle.distributed.get_world_size() > 1: model = fleet.distributed_model(model) + if not config.EVAL: + optimizer = fleet.distributed_optimizer(optimizer) # STEP 7: Validation (eval mode) if config.EVAL: @@ -595,6 +529,7 @@ def main_worker(*args): model=model, optimizer=optimizer, criterion=criterion, + lr_scheduler=lr_scheduler, base_lr=config.TRAIN.BASE_LR, min_lr=config.TRAIN.END_LR, epoch=epoch, @@ -609,7 +544,7 @@ def main_worker(*args): master_logger=master_logger) general_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " - f"Lr: {optimizer.get_lr():.4f}, " + f"Lr: {optimizer.get_lr():.6e}, " f"time: {train_time:.2f}") local_message = (general_message + @@ -656,34 +591,30 @@ def main_worker(*args): state_dict['optimizer'] = optimizer.state_dict() state_dict['epoch'] = epoch if amp_grad_scaler is not None: - state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict() + state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict() + if lr_scheduler is not None: + state_dict['lr_scheduler'] = lr_scheduler.state_dict() paddle.save(state_dict, model_path) message = (f"----- Save model: {model_path}") write_log(local_logger, master_logger, message) def main(): - # config is updated by: (1) config.py, (2) yaml file, (3) arguments - arguments = get_arguments() - config = get_config() - config = update_config(config, arguments) + # config is updated in order: (1) default in config.py, (2) yaml file, (3) arguments + config = update_config(get_config(), get_arguments()) + # set output folder - if not config.EVAL: - config.SAVE = '{}/linearprobing-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) - else: - config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) + config.SAVE = os.path.join(config.SAVE, + f"{'eval' if config.EVAL else 'linearprobe'}-{time.strftime('%Y%m%d-%H-%M')}") if not os.path.exists(config.SAVE): os.makedirs(config.SAVE, exist_ok=True) - # get train dataset if in train mode - if config.EVAL: - dataset_train = None - else: - dataset_train = get_dataset(config, mode='train') - # get val dataset - dataset_val = get_dataset(config, mode='val') - # start training - #config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS - #dist.spawn(main_worker, args=(config, dataset_train, dataset_val), nprocs=config.NGPUS) + + # get train dataset if in train mode and val dataset + dataset_train = get_dataset(config, is_train=True) if not config.EVAL else None + dataset_val = get_dataset(config, is_train=False) + + # dist spawn lunch: use CUDA_VISIBLE_DEVICES to set available gpus + #paddle.distributed.spawn(main_worker, args=(config, dataset_train, dataset_val)) main_worker(config, dataset_train, dataset_val) diff --git a/image_classification/MAE/main_multi_gpu_pretrain.py b/self_supervised_learning/MAE/main_multi_gpu_pretrain.py similarity index 63% rename from image_classification/MAE/main_multi_gpu_pretrain.py rename to self_supervised_learning/MAE/main_multi_gpu_pretrain.py index 61580972..31a59041 100644 --- a/image_classification/MAE/main_multi_gpu_pretrain.py +++ b/self_supervised_learning/MAE/main_multi_gpu_pretrain.py @@ -17,38 +17,39 @@ import sys import os import time -import logging import argparse import random import math import numpy as np import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import paddle.distributed as dist from paddle.distributed import fleet from datasets import get_dataloader from datasets import get_dataset -from transformer import build_mae_pretrain as build_model +from config import get_config +from config import update_config from utils import AverageMeter -from utils import get_exclude_from_weight_decay_fn +from utils import get_logger +from utils import write_log +from utils import all_reduce_mean +from utils import skip_weight_decay_fn from utils import get_params_groups from utils import adjust_learning_rate -from config import get_config -from config import update_config +#from mixup import Mixup +#from losses import LabelSmoothingCrossEntropyLoss +#from losses import SoftTargetCrossEntropyLoss +from transformer import build_mae_pretrain as build_model import paddlenlp - def get_arguments(): - """return argumeents, this will overwrite the config after loading yaml file""" - parser = argparse.ArgumentParser('MAE') + """return argumeents, this will overwrite the config by (1) yaml file (2) argument values""" + parser = argparse.ArgumentParser('MAE Pretrain') parser.add_argument('-cfg', type=str, default=None) parser.add_argument('-dataset', type=str, default=None) - parser.add_argument('-batch_size', type=int, default=None) - parser.add_argument('-image_size', type=int, default=None) parser.add_argument('-data_path', type=str, default=None) parser.add_argument('-output', type=str, default=None) - parser.add_argument('-ngpus', type=int, default=None) + parser.add_argument('-batch_size', type=int, default=None) + parser.add_argument('-batch_size_eval', type=int, default=None) + parser.add_argument('-image_size', type=int, default=None) parser.add_argument('-accum_iter', type=int, default=None) parser.add_argument('-pretrained', type=str, default=None) parser.add_argument('-resume', type=str, default=None) @@ -59,99 +60,11 @@ def get_arguments(): return arguments -def get_logger(file_path): - """Set logging file and format, logs are written in 2 loggers, one local_logger records - the information on its own gpu/process, one master_logger records the overall/average - information over all gpus/processes. - Args: - file_path: str, folder path of the logger files to write - Return: - local_logger: python logger for each process - master_logger: python logger for overall processes (on node 0) - """ - local_rank = dist.get_rank() - filename = os.path.join(file_path, f'log_all.txt') - log_format = "%(asctime)s %(message)s" - logging.basicConfig(filename=filename, level=logging.INFO, - format=log_format, datefmt="%m%d %I:%M:%S %p") - - # local_logger for each process/GPU - local_logger = logging.getLogger(f'local_{local_rank}') - filename = os.path.join(file_path, f'log_{local_rank}.txt') - fh = logging.FileHandler(filename) - fh.setFormatter(logging.Formatter(log_format)) - local_logger.addHandler(fh) - ## console - #sh = logging.StreamHandler(sys.stdout) - #sh.setFormatter(logging.Formatter(log_format)) - #local_logger.addHandler(sh) - - # master_logger records avg performance - if local_rank == 0: - master_logger = logging.getLogger('master') - # log.txt - filename = os.path.join(file_path, f'log.txt') - fh = logging.FileHandler(filename) - fh.setFormatter(logging.Formatter(log_format)) - master_logger.addHandler(fh) - # console - sh = logging.StreamHandler(sys.stdout) - sh.setFormatter(logging.Formatter(log_format)) - master_logger.addHandler(sh) - else: - master_logger = None - return local_logger, master_logger - - -def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): - """Write messages in loggers - Args: - local_logger: python logger, logs information on single gpu - master_logger: python logger, logs information over all gpus - msg_local: str, message to log on local_logger - msg_master: str, message to log on master_logger, if None, use msg_local, default: None - level: str, log level, in ['info', 'warning', 'fatal'], default: 'info' - """ - # write log to local logger - if local_logger: - if level == 'info': - local_logger.info(msg_local) - elif level == 'warning': - local_logger.warning(msg_local) - elif level == 'fatal': - local_logger.fatal(msg_local) - else: - raise ValueError("level must in ['info', 'warning', 'fatal']") - # write log to master logger on node 0 - if master_logger and dist.get_rank() == 0: - if msg_master is None: - msg_master = msg_local - if level == 'info': - master_logger.info("MASTER_LOG " + msg_master) - elif level == 'warning': - master_logger.warning("MASTER_LOG " + msg_master) - elif level == 'fatal': - master_logger.fatal("MASTER_LOG " + msg_master) - else: - raise ValueError("level must in ['info', 'warning', 'fatal']") - - -def all_reduce_mean(x): - """perform all_reduce on Tensor""" - world_size = dist.get_world_size() - if world_size > 1: - x_reduce = paddle.to_tensor(x) - dist.all_reduce(x_reduce) - x_reduce = x_reduce / world_size - return x_reduce.item() - else: - return x - - def train(dataloader, model, mask_ratio, optimizer, + lr_scheduler, base_lr, min_lr, epoch, @@ -200,12 +113,13 @@ def train(dataloader, batch_size = images.shape[0] # adjust learning rate if batch_id % accum_iter == 0: - adjust_learning_rate(optimizer, - base_lr, - min_lr, - batch_id / total_batches + epoch - 1, - warmup_epochs, - total_epochs) + lr_scheduler.step(batch_id / total_batches + epoch -1) + #adjust_learning_rate(optimizer, + # base_lr, + # min_lr, + # batch_id / total_batches + epoch - 1, + # warmup_epochs, + # total_epochs) # forward with paddle.amp.auto_cast(amp_grad_scaler is not None): loss, _, _ = model(images) @@ -216,7 +130,7 @@ def train(dataloader, sys.exit(1) loss = loss / accum_iter - + # backward and step if amp_grad_scaler is None: # fp32 loss.backward() @@ -227,7 +141,7 @@ def train(dataloader, scaled_loss = amp_grad_scaler.scale(loss) scaled_loss.backward() if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)): - # amp for param group refer here: https://github.com/PaddlePaddle/Paddle/issues/37188 + # amp for param group reference: https://github.com/PaddlePaddle/Paddle/issues/37188 amp_grad_scaler.step(optimizer) amp_grad_scaler.update() optimizer.clear_grad() @@ -238,25 +152,27 @@ def train(dataloader, master_loss_meter.update(master_loss, master_batch_size) train_loss_meter.update(loss_value, batch_size) if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader): - general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " + general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], " f"Step[{batch_id:04d}/{total_batches:04d}], " - f"Lr: {optimizer.get_lr():04f}, ") + f"Lr: {optimizer.get_lr():.6e}, ") local_message = (general_message + f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f})") master_message = (general_message + f"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f})") write_log(local_logger, master_logger, local_message, master_message) - dist.barrier() + paddle.distributed.barrier() train_time = time.time() - time_st return train_loss_meter.avg, master_loss_meter.avg, train_time def main_worker(*args): + """main method for each process""" # STEP 0: Preparation - #dist.init_parallel_env() - world_size = dist.get_world_size() - local_rank = dist.get_rank() + paddle.device.set_device('gpu') + #paddle.distributed.init_parallel_env() + world_size = paddle.distributed.get_world_size() + local_rank = paddle.distributed.get_rank() config = args[0] last_epoch = config.TRAIN.LAST_EPOCH seed = config.SEED + local_rank @@ -265,13 +181,13 @@ def main_worker(*args): random.seed(seed) # logger for each process/gpu local_logger, master_logger = get_logger(config.SAVE) - message = f'----- world_size = {world_size}, local_rank = {local_rank}' + message = (f'----- world_size = {world_size}, local_rank = {local_rank} \n' + f'----- {config}') write_log(local_logger, master_logger, message) - + # STEP 1: Create model - paddle.device.set_device('gpu') model = build_model(config) - if dist.get_world_size() > 1: + if paddle.distributed.get_world_size() > 1: strategy = fleet.DistributedStrategy() ## Hybrid Parallel Training strategy.hybrid_configs = {} @@ -279,9 +195,9 @@ def main_worker(*args): # STEP 2: Create train dataloader dataset_train = args[1] - dataloader_train = get_dataloader(config, dataset_train, 'train', True) + dataloader_train = get_dataloader(config, dataset_train, True, True) total_batch_train = len(dataloader_train) - message = f'----- Total # of train batch (on single gpu): {total_batch_train}' + message = f'----- Total # of train batch (single gpu): {total_batch_train}' write_log(local_logger, master_logger, message) # STEP 3: Define optimizer and lr_scheduler @@ -293,55 +209,58 @@ def main_worker(*args): ) write_log(local_logger, master_logger, f'Base lr is scaled to: {config.TRAIN.BASE_LR}') # define scaler for amp training - if config.AMP is True: - amp_grad_scaler = paddle.amp.GradScaler() # default init_loss_scaling = 32768 - else: - amp_grad_scaler = None + amp_grad_scaler = paddle.amp.GradScaler() if config.AMP else None # set gradient clip if config.TRAIN.GRAD_CLIP: clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP) else: clip = None # set optimizer + # create warmup and cosine decay lr scheduler + if config.TRAIN.WARMUP_EPOCHS > 0: + cosine_lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=config.TRAIN.BASE_LR, + T_max=config.TRAIN.NUM_EPOCHS - config.TRAIN.WARMUP_EPOCHS, + eta_min=config.TRAIN.END_LR, + last_epoch=-1) # do not set last epoch, handled in warmup sched get_lr() + lr_scheduler = paddle.optimizer.lr.LinearWarmup( + learning_rate=cosine_lr_scheduler, # use cosine lr sched after warmup + warmup_steps=config.TRAIN.WARMUP_EPOCHS, # only support position integet + start_lr=config.TRAIN.WARMUP_START_LR, + end_lr=config.TRAIN.BASE_LR, + last_epoch=config.TRAIN.LAST_EPOCH) + else: # create cosine decay lr scheduler if no warmup epochs + lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=config.TRAIN.BASE_LR, + T_max=config.TRAIN.NUM_EPOCHS, + eta_min=config.TRAIN.END_LR, + last_epoch=config.TRAIN.LAST_EPOCH) + if config.TRAIN.OPTIMIZER.NAME == "AdamW": - #wd_exclude_list = ['encoder_position_embedding', 'cls_token'] - wd_exclude_list = [] - for n, p in model.named_parameters(): - if p.stop_gradient is True: - continue - if len(p.shape) == 1 or n.endswith('.bias'): - wd_exclude_list.append(n) - #print('no_decay param names: ', wd_exclude_list) optimizer = paddle.optimizer.AdamW( parameters=model.parameters(), - learning_rate=config.TRAIN.BASE_LR, #scheduler if scheduler is not None else config.TRAIN.BASE_LR, + learning_rate=lr_scheduler, # now only support warmup + consine beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], - weight_decay=config.TRAIN.WEIGHT_DECAY, # set by params_groups, this vaule is not effectitve - apply_decay_param_fun=get_exclude_from_weight_decay_fn(wd_exclude_list), + weight_decay=config.TRAIN.WEIGHT_DECAY, epsilon=config.TRAIN.OPTIMIZER.EPS, - grad_clip=clip) - elif config.TRAIN.OPTIMIZER.NAME == "AdamWDL": - name_dict = dict() - wd_exclude_list = ['encoder_position_embedding', 'cls_token'] - for n, p in model.named_parameters(): - # name_dict is for AdamWDL argument 'name_dict' - name_dict[p.name] = n - # add no decay param name to weight exclude list, for AramWDL argument 'apply_decay_param_fn' - if p.stop_gradient is True: - continue - if len(p.shape) == 1 or n.endswith('.bias'): - wd_exclude_list.append(n) - #print('no_decay param names: ', wd_exclude_list) + grad_clip=clip, + apply_decay_param_fun=skip_weight_decay_fn( + model, # skip bn and bias in model + ['encoder_position_embedding', 'cls_token']), # skip custom ops + ) + elif config.TRAIN.OPTIMIZER.NAME == "AdamWDL": # using paddlenlp's impl optimizer = paddlenlp.ops.optimizer.AdamWDL( - learning_rate=config.TRAIN.BASE_LR, + learning_rate=lr_scheduler, weight_decay=config.TRAIN.WEIGHT_DECAY, layerwise_decay=config.TRAIN.LAYER_DECAY, - n_layers=config.MODEL.TRANS.ENCODER.DEPTH, + n_layers=config.MODEL.ENCODER.DEPTH, set_param_lr_fun=lr_decay.lr_setting, parameters=model.parameters(), name_dict=name_dict, - apply_decay_param_fun=get_exclude_from_weight_decay_fn(wd_exclude_list), + apply_decay_param_fun=skip_weight_decay_fn( + model, # skip bn and bias in model + ['encoder_position_embedding', 'cls_token']), # skip custom ops beta1=config.TRAIN.OPTIMIZER.BETAS[0], beta2=config.TRAIN.OPTIMIZER.BETAS[1], epsilon=config.TRAIN.OPTIMIZER.EPS, @@ -368,35 +287,40 @@ def main_worker(*args): model_state = paddle.load(config.MODEL.RESUME) if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch model.set_state_dict(model_state['model']) - if 'optimizer' in model_state and 'epoch' in model_state: + if 'optimizer' in model_state: optimizer.set_state_dict(model_state['optimizer']) - config.TRAIN.LAST_EPOCH = model_state['epoch'] + 1 + if 'lr_scheduler' in model_state and lr_scheduler is not None: + lr_scheduler.set_state_dict(model_state['lr_scheduler']) + if 'epoch' in model_state: + config.TRAIN.LAST_EPOCH = model_state['epoch'] if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None: amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler']) + if config.TRAIN.MODEL_EMA: + model_ema.module.set_state_dict(model_state['model_ema']) + lr_scheduler.step(config.TRAIN.LAST_EPOCH) message = (f"----- Resume Training: Load model from {config.MODEL.RESUME}, " f"opt = [{'optimizer' in model_state}], " + f"lr_scheduler = [{'lr_scheduler' in model_state}], " + f"model_ema = [{'model_ema' in model_state}], " f"epoch = [{model_state.get('epoch', -1)}], " f"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]") write_log(local_logger, master_logger, message) else: # direct load pdparams without other items - message = f"----- Resume Training: Load model from {config.MODEL.RESUME}, no opt, epoch, or scaler is set!" + message = f"----- Resume Training: Load from {config.MODEL.RESUME}, no opt/epoch/scaler" write_log(local_logger, master_logger, message, 'warning') - model.set_dict(model_state) - - # STEP 5: Start training (train mode) - if dist.get_world_size() > 1: - model = fleet.distributed_model(model) + model.set_state_dict(model_state) write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch + 1}.") for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1): - # train + # Train one epoch write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}") train_loss, avg_loss, train_time = train( dataloader=dataloader_train, model=model, - mask_ratio=config.MODEL.TRANS.MASK_RATIO, + mask_ratio=config.MODEL.MASK_RATIO, optimizer=optimizer, + lr_scheduler=lr_scheduler, base_lr=config.TRAIN.BASE_LR, min_lr=config.TRAIN.END_LR, epoch=epoch, @@ -410,7 +334,7 @@ def main_worker(*args): master_logger=master_logger) general_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " - f"Lr: {optimizer.get_lr():.4f}, " + f"Lr: {optimizer.get_lr():.6e}, " f"time: {train_time:.2f}, ") local_message = (general_message + f"Train Loss: {train_loss:.4f}") @@ -429,25 +353,24 @@ def main_worker(*args): state_dict['epoch'] = epoch if amp_grad_scaler is not None: state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict() + if lr_scheduler is not None: + state_dict['lr_scheduler'] = lr_scheduler.state_dict() paddle.save(state_dict, model_path) message = (f"----- Save model: {model_path}") write_log(local_logger, master_logger, message) def main(): - # config is updated by: (1) config.py, (2) yaml file, (3) arguments - arguments = get_arguments() - config = get_config() - config = update_config(config, arguments) + # config is updated in order: (1) default in config.py, (2) yaml file, (3) arguments + config = update_config(get_config(), get_arguments()) # set output folder config.SAVE = '{}/pretrain-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M')) if not os.path.exists(config.SAVE): os.makedirs(config.SAVE, exist_ok=True) # get dataset - dataset_train = get_dataset(config, mode='train') + dataset_train = get_dataset(config, is_train=True) # start training - #config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS - #dist.spawn(main_worker, args=(config, dataset_train, ), nprocs=config.NGPUS) + #paddle.distributed.spawn(main_worker, args=(config, dataset_train, )) main_worker(config, dataset_train, ) diff --git a/image_classification/MAE/mixup.py b/self_supervised_learning/MAE/mixup.py similarity index 97% rename from image_classification/MAE/mixup.py rename to self_supervised_learning/MAE/mixup.py index 1d2db493..c365dcdf 100644 --- a/image_classification/MAE/mixup.py +++ b/self_supervised_learning/MAE/mixup.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 PPViT Authors. All Rights Reserved. +# Copyright (c) 2021 PPViT Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ def rand_bbox(image_shape, lam, count=None): bbox_y2 = np.clip(cy + cut_h // 2, 0, image_h) # NOTE: in paddle, tensor indexing e.g., a[x1:x2], - # if x1 == x2, paddle will raise ValueErros, + # if x1 == x2, paddle will raise ValueErros, # while in pytorch, it will return [] tensor return bbox_x1, bbox_y1, bbox_x2, bbox_y2 @@ -63,8 +63,8 @@ def rand_bbox_minmax(image_shape, minmax, count=None): image_h, image_w = image_shape[-2:] min_ratio = minmax[0] max_ratio = minmax[1] - cut_h = np.random.randint(int(image_h * min_ratio), int(image_h * max_ratio), size=count) - cut_w = np.random.randint(int(image_w * min_ratio), int(image_w * max_ratio), size=count) + cut_h = np.random.randint(int(image_h * min_ratio), int(image_h * max_ratio), size=count) + cut_w = np.random.randint(int(image_w * min_ratio), int(image_w * max_ratio), size=count) bbox_x1 = np.random.randint(0, image_w - cut_w, size=count) bbox_y1 = np.random.randint(0, image_h - cut_h, size=count) @@ -213,7 +213,7 @@ def _mix_batch(self, x): correct_lam=self.correct_lam) # NOTE: in paddle, tensor indexing e.g., a[x1:x2], - # if x1 == x2, paddle will raise ValueErros, + # if x1 == x2, paddle will raise ValueErros, # but in pytorch, it will return [] tensor without errors if int(bbox_x1) != int(bbox_x2) and int(bbox_y1) != int(bbox_y2): x[:, :, int(bbox_x1): int(bbox_x2), int(bbox_y1): int(bbox_y2)] = x.flip(axis=[0])[ diff --git a/self_supervised_learning/MAE/nohup.out b/self_supervised_learning/MAE/nohup.out new file mode 100644 index 00000000..5bd262f2 --- /dev/null +++ b/self_supervised_learning/MAE/nohup.out @@ -0,0 +1,1677 @@ +WARNING 2022-03-22 22:11:35,173 launch.py:503] Not found distinct arguments and compiled with cuda or xpu or npu. Default use collective mode +INFO 2022-03-22 22:11:35,175 launch_utils.py:557] Local start 8 processes. First process distributed environment info (Only For Debug): + +=======================================================================================+ + | Distributed Envs Value | + +---------------------------------------------------------------------------------------+ + | PADDLE_TRAINER_ID 0 | + | PADDLE_CURRENT_ENDPOINT 127.0.0.1:11075 | + | PADDLE_TRAINERS_NUM 8 | + | PADDLE_TRAINER_ENDPOINTS ... 0.1:15064,127.0.0.1:41881,127.0.0.1:50174| + | PADDLE_RANK_IN_NODE 0 | + | PADDLE_LOCAL_DEVICE_IDS 0 | + | PADDLE_WORLD_DEVICE_IDS 0,1,2,3,4,5,6,7 | + | FLAGS_selected_gpus 0 | + | FLAGS_selected_accelerators 0 | + +=======================================================================================+ + +INFO 2022-03-22 22:11:35,175 launch_utils.py:562] details about PADDLE_TRAINER_ENDPOINTS can be found in log/endpoints.log, and detail running logs maybe found in log/workerlog.0 +----------- Configuration Arguments ----------- +backend: auto +cluster_topo_path: None +elastic_pre_hook: None +elastic_server: None +enable_auto_mapping: False +force: False +gpus: 0,1,2,3,4,5,6,7 +heter_devices: +heter_worker_num: None +heter_workers: +host: None +http_port: None +ips: 127.0.0.1 +job_id: None +log_dir: log +np: None +nproc_per_node: None +rank_mapping_path: None +run_mode: None +scale: 0 +server_num: None +servers: +training_script: main_multi_gpu_linearprobe.py +training_script_args: ['-cfg=./configs/vit_base_patch16_224_linearprobe_single_node.yaml', '-dataset=imagenet2012', '-batch_size=512', '-data_path=/dataset/imagenet', '-pretrained=./mae_pretrain_vit_base.pdparams', '-amp'] +worker_num: None +workers: +------------------------------------------------ +launch train in GPU mode! +launch proc_id:5826 idx:0 +launch proc_id:5829 idx:1 +launch proc_id:5832 idx:2 +launch proc_id:5835 idx:3 +launch proc_id:5838 idx:4 +launch proc_id:5841 idx:5 +launch proc_id:5844 idx:6 +launch proc_id:5847 idx:7 +/usr/local/lib/python3.7/site-packages/paddlenlp/transformers/funnel/modeling.py:30: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working + from collections import Iterable +Compose( + + + + +) +----- Imagenet2012 train_list.txt len = 1281167 +----- Imagenet2012 val_list.txt len = 50000 +2022-03-22 22:11:41,621 MASTER_LOG ----- world_size = 8, local_rank = 0 +----- AMP: True +BASE: [''] +DATA: + BATCH_SIZE: 512 + BATCH_SIZE_EVAL: 512 + CROP_PCT: 0.875 + DATASET: imagenet2012 + DATA_PATH: /dataset/imagenet + IMAGENET_MEAN: [0.485, 0.456, 0.406] + IMAGENET_STD: [0.229, 0.224, 0.225] + IMAGE_CHANNELS: 3 + IMAGE_SIZE: 224 + NUM_WORKERS: 2 +EVAL: False +MODEL: + ATTENTION_DROPOUT: 0.0 + DECODER: + DEPTH: 8 + EMBED_DIM: 512 + NUM_HEADS: 16 + DROPOUT: 0.0 + DROPPATH: 0.0 + ENCODER: + DEPTH: 12 + EMBED_DIM: 768 + NUM_HEADS: 12 + GLOBAL_POOL: False + MASK_RATIO: 0.75 + MLP_RATIO: 4.0 + NAME: vit_base_patch16_224 + NORM_PIX_LOSS: True + NUM_CLASSES: 1000 + PATCH_SIZE: 16 + PRETRAINED: ./mae_pretrain_vit_base.pdparams + QKV_BIAS: True + RESUME: None + TYPE: LINEARPROBE +REPORT_FREQ: 20 +SAVE: ./output/linearprobe-20220322-22-11 +SAVE_FREQ: 10 +SEED: 0 +TRAIN: + ACCUM_ITER: 4 + AUTO_AUGMENT: False + BASE_LR: 0.1 + COLOR_JITTER: 0.4 + CUTMIX_ALPHA: 1.0 + CUTMIX_MINMAX: None + END_LR: 0.0 + GRAD_CLIP: None + LAST_EPOCH: 0 + LAYER_DECAY: None + LINEAR_SCALED_LR: 256 + MIXUP_ALPHA: 0.8 + MIXUP_MODE: batch + MIXUP_PROB: 1.0 + MIXUP_SWITCH_PROB: 0.5 + NUM_EPOCHS: 90 + OPTIMIZER: + BETAS: (0.9, 0.95) + EPS: 1e-08 + NAME: LARS + RANDOM_ERASE_COUNT: 1 + RANDOM_ERASE_MODE: pixel + RANDOM_ERASE_PROB: 0.25 + RANDOM_ERASE_SPLIT: False + RAND_AUGMENT: True + RAND_AUGMENT_LAYERS: 2 + RAND_AUGMENT_MAGNITUDE: 9 + SMOOTHING: 0.1 + WARMUP_EPOCHS: 10 + WARMUP_START_LR: 0.0 + WEIGHT_DECAY: 0.0 +VALIDATE_FREQ: 1 +W0322 22:11:41.623504 5826 gpu_context.cc:240] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 +W0322 22:11:41.630729 5826 gpu_context.cc:268] device: 0, cuDNN Version: 7.6. +encoder_position_embedding [1, 197, 768] True +cls_token [1, 1, 768] True +patch_embedding.patch_embedding.weight [768, 3, 16, 16] True +patch_embedding.patch_embedding.bias [768] True +encoder.layers.0.attn_norm.weight [768] True +encoder.layers.0.attn_norm.bias [768] True +encoder.layers.0.attn.qkv.weight [768, 2304] True +encoder.layers.0.attn.qkv.bias [2304] True +encoder.layers.0.attn.out.weight [768, 768] True +encoder.layers.0.attn.out.bias [768] True +encoder.layers.0.mlp_norm.weight [768] True +encoder.layers.0.mlp_norm.bias [768] True +encoder.layers.0.mlp.fc1.weight [768, 3072] True +encoder.layers.0.mlp.fc1.bias [3072] True +encoder.layers.0.mlp.fc2.weight [3072, 768] True +encoder.layers.0.mlp.fc2.bias [768] True +encoder.layers.1.attn_norm.weight [768] True +encoder.layers.1.attn_norm.bias [768] True +encoder.layers.1.attn.qkv.weight [768, 2304] True +encoder.layers.1.attn.qkv.bias [2304] True +encoder.layers.1.attn.out.weight [768, 768] True +encoder.layers.1.attn.out.bias [768] True +encoder.layers.1.mlp_norm.weight [768] True +encoder.layers.1.mlp_norm.bias [768] True +encoder.layers.1.mlp.fc1.weight [768, 3072] True +encoder.layers.1.mlp.fc1.bias [3072] True +encoder.layers.1.mlp.fc2.weight [3072, 768] True +encoder.layers.1.mlp.fc2.bias [768] True +encoder.layers.2.attn_norm.weight [768] True +encoder.layers.2.attn_norm.bias [768] True +encoder.layers.2.attn.qkv.weight [768, 2304] True +encoder.layers.2.attn.qkv.bias [2304] True +encoder.layers.2.attn.out.weight [768, 768] True +encoder.layers.2.attn.out.bias [768] True +encoder.layers.2.mlp_norm.weight [768] True +encoder.layers.2.mlp_norm.bias [768] True +encoder.layers.2.mlp.fc1.weight [768, 3072] True +encoder.layers.2.mlp.fc1.bias [3072] True +encoder.layers.2.mlp.fc2.weight [3072, 768] True +encoder.layers.2.mlp.fc2.bias [768] True +encoder.layers.3.attn_norm.weight [768] True +encoder.layers.3.attn_norm.bias [768] True +encoder.layers.3.attn.qkv.weight [768, 2304] True +encoder.layers.3.attn.qkv.bias [2304] True +encoder.layers.3.attn.out.weight [768, 768] True +encoder.layers.3.attn.out.bias [768] True +encoder.layers.3.mlp_norm.weight [768] True +encoder.layers.3.mlp_norm.bias [768] True +encoder.layers.3.mlp.fc1.weight [768, 3072] True +encoder.layers.3.mlp.fc1.bias [3072] True +encoder.layers.3.mlp.fc2.weight [3072, 768] True +encoder.layers.3.mlp.fc2.bias [768] True +encoder.layers.4.attn_norm.weight [768] True +encoder.layers.4.attn_norm.bias [768] True +encoder.layers.4.attn.qkv.weight [768, 2304] True +encoder.layers.4.attn.qkv.bias [2304] True +encoder.layers.4.attn.out.weight [768, 768] True +encoder.layers.4.attn.out.bias [768] True +encoder.layers.4.mlp_norm.weight [768] True +encoder.layers.4.mlp_norm.bias [768] True +encoder.layers.4.mlp.fc1.weight [768, 3072] True +encoder.layers.4.mlp.fc1.bias [3072] True +encoder.layers.4.mlp.fc2.weight [3072, 768] True +encoder.layers.4.mlp.fc2.bias [768] True +encoder.layers.5.attn_norm.weight [768] True +encoder.layers.5.attn_norm.bias [768] True +encoder.layers.5.attn.qkv.weight [768, 2304] True +encoder.layers.5.attn.qkv.bias [2304] True +encoder.layers.5.attn.out.weight [768, 768] True +encoder.layers.5.attn.out.bias [768] True +encoder.layers.5.mlp_norm.weight [768] True +encoder.layers.5.mlp_norm.bias [768] True +encoder.layers.5.mlp.fc1.weight [768, 3072] True +encoder.layers.5.mlp.fc1.bias [3072] True +encoder.layers.5.mlp.fc2.weight [3072, 768] True +encoder.layers.5.mlp.fc2.bias [768] True +encoder.layers.6.attn_norm.weight [768] True +encoder.layers.6.attn_norm.bias [768] True +encoder.layers.6.attn.qkv.weight [768, 2304] True +encoder.layers.6.attn.qkv.bias [2304] True +encoder.layers.6.attn.out.weight [768, 768] True +encoder.layers.6.attn.out.bias [768] True +encoder.layers.6.mlp_norm.weight [768] True +encoder.layers.6.mlp_norm.bias [768] True +encoder.layers.6.mlp.fc1.weight [768, 3072] True +encoder.layers.6.mlp.fc1.bias [3072] True +encoder.layers.6.mlp.fc2.weight [3072, 768] True +encoder.layers.6.mlp.fc2.bias [768] True +encoder.layers.7.attn_norm.weight [768] True +encoder.layers.7.attn_norm.bias [768] True +encoder.layers.7.attn.qkv.weight [768, 2304] True +encoder.layers.7.attn.qkv.bias [2304] True +encoder.layers.7.attn.out.weight [768, 768] True +encoder.layers.7.attn.out.bias [768] True +encoder.layers.7.mlp_norm.weight [768] True +encoder.layers.7.mlp_norm.bias [768] True +encoder.layers.7.mlp.fc1.weight [768, 3072] True +encoder.layers.7.mlp.fc1.bias [3072] True +encoder.layers.7.mlp.fc2.weight [3072, 768] True +encoder.layers.7.mlp.fc2.bias [768] True +encoder.layers.8.attn_norm.weight [768] True +encoder.layers.8.attn_norm.bias [768] True +encoder.layers.8.attn.qkv.weight [768, 2304] True +encoder.layers.8.attn.qkv.bias [2304] True +encoder.layers.8.attn.out.weight [768, 768] True +encoder.layers.8.attn.out.bias [768] True +INFO 2022-03-22 22:12:02,351 launch_utils.py:321] terminate process group gid:5838 +INFO 2022-03-22 22:12:02,351 launch_utils.py:321] terminate process group gid:5841 +INFO 2022-03-22 22:12:02,351 launch_utils.py:321] terminate process group gid:5844 +INFO 2022-03-22 22:12:02,352 launch_utils.py:321] terminate process group gid:5847 +INFO 2022-03-22 22:12:06,355 launch_utils.py:342] terminate all the procs +ERROR 2022-03-22 22:12:06,355 launch_utils.py:638] ABORT!!! Out of all 8 trainers, the trainer process with rank=[0, 1, 2, 3] was aborted. Please check its log. +INFO 2022-03-22 22:12:10,359 launch_utils.py:342] terminate all the procs +INFO 2022-03-22 22:12:10,359 launch.py:391] Local processes completed. +encoder.layers.8.mlp_norm.weight [768] True +encoder.layers.8.mlp_norm.bias [768] True +encoder.layers.8.mlp.fc1.weight [768, 3072] True +encoder.layers.8.mlp.fc1.bias [3072] True +encoder.layers.8.mlp.fc2.weight [3072, 768] True +encoder.layers.8.mlp.fc2.bias [768] True +encoder.layers.9.attn_norm.weight [768] True +encoder.layers.9.attn_norm.bias [768] True +encoder.layers.9.attn.qkv.weight [768, 2304] True +encoder.layers.9.attn.qkv.bias [2304] True +encoder.layers.9.attn.out.weight [768, 768] True +encoder.layers.9.attn.out.bias [768] True +encoder.layers.9.mlp_norm.weight [768] True +encoder.layers.9.mlp_norm.bias [768] True +encoder.layers.9.mlp.fc1.weight [768, 3072] True +encoder.layers.9.mlp.fc1.bias [3072] True +encoder.layers.9.mlp.fc2.weight [3072, 768] True +encoder.layers.9.mlp.fc2.bias [768] True +encoder.layers.10.attn_norm.weight [768] True +encoder.layers.10.attn_norm.bias [768] True +encoder.layers.10.attn.qkv.weight [768, 2304] True +encoder.layers.10.attn.qkv.bias [2304] True +encoder.layers.10.attn.out.weight [768, 768] True +encoder.layers.10.attn.out.bias [768] True +encoder.layers.10.mlp_norm.weight [768] True +encoder.layers.10.mlp_norm.bias [768] True +encoder.layers.10.mlp.fc1.weight [768, 3072] True +encoder.layers.10.mlp.fc1.bias [3072] True +encoder.layers.10.mlp.fc2.weight [3072, 768] True +encoder.layers.10.mlp.fc2.bias [768] True +encoder.layers.11.attn_norm.weight [768] True +encoder.layers.11.attn_norm.bias [768] True +encoder.layers.11.attn.qkv.weight [768, 2304] True +encoder.layers.11.attn.qkv.bias [2304] True +encoder.layers.11.attn.out.weight [768, 768] True +encoder.layers.11.attn.out.bias [768] True +encoder.layers.11.mlp_norm.weight [768] True +encoder.layers.11.mlp_norm.bias [768] True +encoder.layers.11.mlp.fc1.weight [768, 3072] True +encoder.layers.11.mlp.fc1.bias [3072] True +encoder.layers.11.mlp.fc2.weight [3072, 768] True +encoder.layers.11.mlp.fc2.bias [768] True +encoder.norm.weight [768] True +encoder.norm.bias [768] True +classifier.0.weight [768] False +classifier.0.bias [768] False +classifier.0._mean [768] False +classifier.0._variance [768] False +classifier.1.weight [768, 1000] False +classifier.1.bias [1000] False +server not ready, wait 3 sec to retry... +not ready endpoints:['127.0.0.1:17571', '127.0.0.1:20006', '127.0.0.1:55374', '127.0.0.1:25328', '127.0.0.1:15064', '127.0.0.1:41881', '127.0.0.1:50174'] +I0322 22:11:54.265852 5826 nccl_context.cc:82] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 0 +I0322 22:11:56.234949 5826 nccl_context.cc:114] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 10 +2022-03-22 22:11:57,826-INFO: [topology.py:169:__init__] HybridParallelInfo: rank_id: 0, mp_degree: 1, sharding_degree: 1, pp_degree: 1, dp_degree: 8, mp_group: [0], sharding_group: [0], pp_group: [0], dp_group: [0, 1, 2, 3, 4, 5, 6, 7], check/clip group: [0] +2022-03-22 22:11:57,828 MASTER_LOG ----- Total # of train batch (single gpu): 312 +2022-03-22 22:11:57,828 MASTER_LOG ----- Total # of val batch (single gpu): 13 +2022-03-22 22:11:57,829 MASTER_LOG Base lr is scaled to: 6.4 +Traceback (most recent call last): + File "main_multi_gpu_linearprobe.py", line 622, in + main() + File "main_multi_gpu_linearprobe.py", line 618, in main + main_worker(config, dataset_train, dataset_val) + File "main_multi_gpu_linearprobe.py", line 438, in main_worker + assert os.path.isfile(config.MODEL.PRETRAINED) is True +AssertionError +WARNING 2022-03-23 10:07:48,191 launch.py:503] Not found distinct arguments and compiled with cuda or xpu or npu. Default use collective mode +INFO 2022-03-23 10:07:48,193 launch_utils.py:557] Local start 8 processes. First process distributed environment info (Only For Debug): + +=======================================================================================+ + | Distributed Envs Value | + +---------------------------------------------------------------------------------------+ + | PADDLE_TRAINER_ID 0 | + | PADDLE_CURRENT_ENDPOINT 127.0.0.1:16322 | + | PADDLE_TRAINERS_NUM 8 | + | PADDLE_TRAINER_ENDPOINTS ... 0.1:33084,127.0.0.1:60249,127.0.0.1:48028| + | PADDLE_RANK_IN_NODE 0 | + | PADDLE_LOCAL_DEVICE_IDS 0 | + | PADDLE_WORLD_DEVICE_IDS 0,1,2,3,4,5,6,7 | + | FLAGS_selected_gpus 0 | + | FLAGS_selected_accelerators 0 | + +=======================================================================================+ + +INFO 2022-03-23 10:07:48,193 launch_utils.py:562] details about PADDLE_TRAINER_ENDPOINTS can be found in log/endpoints.log, and detail running logs maybe found in log/workerlog.0 +----------- Configuration Arguments ----------- +backend: auto +cluster_topo_path: None +elastic_pre_hook: None +elastic_server: None +enable_auto_mapping: False +force: False +gpus: 0,1,2,3,4,5,6,7 +heter_devices: +heter_worker_num: None +heter_workers: +host: None +http_port: None +ips: 127.0.0.1 +job_id: None +log_dir: log +np: None +nproc_per_node: None +rank_mapping_path: None +run_mode: None +scale: 0 +server_num: None +servers: +training_script: main_multi_gpu_linearprobe.py +training_script_args: ['-cfg=./configs/vit_base_patch16_224_linearprobe_single_node.yaml', '-dataset=imagenet2012', '-batch_size=512', '-data_path=/dataset/imagenet', '-pretrained=./mae_pretrain_vit_base.pdparams', '-amp'] +worker_num: None +workers: +------------------------------------------------ +launch train in GPU mode! +launch proc_id:6102 idx:0 +launch proc_id:6105 idx:1 +launch proc_id:6108 idx:2 +launch proc_id:6111 idx:3 +launch proc_id:6114 idx:4 +launch proc_id:6117 idx:5 +launch proc_id:6120 idx:6 +launch proc_id:6123 idx:7 +/usr/local/lib/python3.7/site-packages/paddlenlp/transformers/funnel/modeling.py:30: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working + from collections import Iterable +Compose( + + + + +) +----- Imagenet2012 train_list.txt len = 1281167 +----- Imagenet2012 val_list.txt len = 50000 +2022-03-23 10:07:54,812 MASTER_LOG ----- world_size = 8, local_rank = 0 +----- AMP: True +BASE: [''] +DATA: + BATCH_SIZE: 512 + BATCH_SIZE_EVAL: 512 + CROP_PCT: 0.875 + DATASET: imagenet2012 + DATA_PATH: /dataset/imagenet + IMAGENET_MEAN: [0.485, 0.456, 0.406] + IMAGENET_STD: [0.229, 0.224, 0.225] + IMAGE_CHANNELS: 3 + IMAGE_SIZE: 224 + NUM_WORKERS: 2 +EVAL: False +MODEL: + ATTENTION_DROPOUT: 0.0 + DECODER: + DEPTH: 8 + EMBED_DIM: 512 + NUM_HEADS: 16 + DROPOUT: 0.0 + DROPPATH: 0.0 + ENCODER: + DEPTH: 12 + EMBED_DIM: 768 + NUM_HEADS: 12 + GLOBAL_POOL: False + MASK_RATIO: 0.75 + MLP_RATIO: 4.0 + NAME: vit_base_patch16_224 + NORM_PIX_LOSS: True + NUM_CLASSES: 1000 + PATCH_SIZE: 16 + PRETRAINED: ./mae_pretrain_vit_base.pdparams + QKV_BIAS: True + RESUME: None + TYPE: LINEARPROBE +REPORT_FREQ: 20 +SAVE: ./output/linearprobe-20220323-10-07 +SAVE_FREQ: 10 +SEED: 0 +TRAIN: + ACCUM_ITER: 4 + AUTO_AUGMENT: False + BASE_LR: 0.1 + COLOR_JITTER: 0.4 + CUTMIX_ALPHA: 1.0 + CUTMIX_MINMAX: None + END_LR: 0.0 + GRAD_CLIP: None + LAST_EPOCH: 0 + LAYER_DECAY: None + LINEAR_SCALED_LR: 256 + MIXUP_ALPHA: 0.8 + MIXUP_MODE: batch + MIXUP_PROB: 1.0 + MIXUP_SWITCH_PROB: 0.5 + NUM_EPOCHS: 90 + OPTIMIZER: + BETAS: (0.9, 0.95) + EPS: 1e-08 + NAME: LARS + RANDOM_ERASE_COUNT: 1 + RANDOM_ERASE_MODE: pixel + RANDOM_ERASE_PROB: 0.25 + RANDOM_ERASE_SPLIT: False + RAND_AUGMENT: True + RAND_AUGMENT_LAYERS: 2 + RAND_AUGMENT_MAGNITUDE: 9 + SMOOTHING: 0.1 + WARMUP_EPOCHS: 10 + WARMUP_START_LR: 0.0 + WEIGHT_DECAY: 0.0 +VALIDATE_FREQ: 1 +W0323 10:07:54.817029 6102 gpu_context.cc:240] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 +W0323 10:07:54.823293 6102 gpu_context.cc:268] device: 0, cuDNN Version: 7.6. +encoder_position_embedding [1, 197, 768] True +cls_token [1, 1, 768] True +patch_embedding.patch_embedding.weight [768, 3, 16, 16] True +patch_embedding.patch_embedding.bias [768] True +encoder.layers.0.attn_norm.weight [768] True +encoder.layers.0.attn_norm.bias [768] True +encoder.layers.0.attn.qkv.weight [768, 2304] True +encoder.layers.0.attn.qkv.bias [2304] True +encoder.layers.0.attn.out.weight [768, 768] True +encoder.layers.0.attn.out.bias [768] True +encoder.layers.0.mlp_norm.weight [768] True +encoder.layers.0.mlp_norm.bias [768] True +encoder.layers.0.mlp.fc1.weight [768, 3072] True +encoder.layers.0.mlp.fc1.bias [3072] True +encoder.layers.0.mlp.fc2.weight [3072, 768] True +encoder.layers.0.mlp.fc2.bias [768] True +encoder.layers.1.attn_norm.weight [768] True +encoder.layers.1.attn_norm.bias [768] True +encoder.layers.1.attn.qkv.weight [768, 2304] True +encoder.layers.1.attn.qkv.bias [2304] True +encoder.layers.1.attn.out.weight [768, 768] True +encoder.layers.1.attn.out.bias [768] True +encoder.layers.1.mlp_norm.weight [768] True +encoder.layers.1.mlp_norm.bias [768] True +encoder.layers.1.mlp.fc1.weight [768, 3072] True +encoder.layers.1.mlp.fc1.bias [3072] True +encoder.layers.1.mlp.fc2.weight [3072, 768] True +encoder.layers.1.mlp.fc2.bias [768] True +encoder.layers.2.attn_norm.weight [768] True +encoder.layers.2.attn_norm.bias [768] True +encoder.layers.2.attn.qkv.weight [768, 2304] True +encoder.layers.2.attn.qkv.bias [2304] True +encoder.layers.2.attn.out.weight [768, 768] True +encoder.layers.2.attn.out.bias [768] True +encoder.layers.2.mlp_norm.weight [768] True +encoder.layers.2.mlp_norm.bias [768] True +encoder.layers.2.mlp.fc1.weight [768, 3072] True +encoder.layers.2.mlp.fc1.bias [3072] True +encoder.layers.2.mlp.fc2.weight [3072, 768] True +encoder.layers.2.mlp.fc2.bias [768] True +encoder.layers.3.attn_norm.weight [768] True +encoder.layers.3.attn_norm.bias [768] True +encoder.layers.3.attn.qkv.weight [768, 2304] True +encoder.layers.3.attn.qkv.bias [2304] True +encoder.layers.3.attn.out.weight [768, 768] True +encoder.layers.3.attn.out.bias [768] True +encoder.layers.3.mlp_norm.weight [768] True +encoder.layers.3.mlp_norm.bias [768] True +encoder.layers.3.mlp.fc1.weight [768, 3072] True +encoder.layers.3.mlp.fc1.bias [3072] True +encoder.layers.3.mlp.fc2.weight [3072, 768] True +encoder.layers.3.mlp.fc2.bias [768] True +encoder.layers.4.attn_norm.weight [768] True +encoder.layers.4.attn_norm.bias [768] True +encoder.layers.4.attn.qkv.weight [768, 2304] True +encoder.layers.4.attn.qkv.bias [2304] True +encoder.layers.4.attn.out.weight [768, 768] True +encoder.layers.4.attn.out.bias [768] True +encoder.layers.4.mlp_norm.weight [768] True +encoder.layers.4.mlp_norm.bias [768] True +encoder.layers.4.mlp.fc1.weight [768, 3072] True +encoder.layers.4.mlp.fc1.bias [3072] True +encoder.layers.4.mlp.fc2.weight [3072, 768] True +encoder.layers.4.mlp.fc2.bias [768] True +encoder.layers.5.attn_norm.weight [768] True +encoder.layers.5.attn_norm.bias [768] True +encoder.layers.5.attn.qkv.weight [768, 2304] True +encoder.layers.5.attn.qkv.bias [2304] True +encoder.layers.5.attn.out.weight [768, 768] True +encoder.layers.5.attn.out.bias [768] True +encoder.layers.5.mlp_norm.weight [768] True +encoder.layers.5.mlp_norm.bias [768] True +encoder.layers.5.mlp.fc1.weight [768, 3072] True +encoder.layers.5.mlp.fc1.bias [3072] True +encoder.layers.5.mlp.fc2.weight [3072, 768] True +encoder.layers.5.mlp.fc2.bias [768] True +encoder.layers.6.attn_norm.weight [768] True +encoder.layers.6.attn_norm.bias [768] True +encoder.layers.6.attn.qkv.weight [768, 2304] True +encoder.layers.6.attn.qkv.bias [2304] True +encoder.layers.6.attn.out.weight [768, 768] True +encoder.layers.6.attn.out.bias [768] True +encoder.layers.6.mlp_norm.weight [768] True +encoder.layers.6.mlp_norm.bias [768] True +encoder.layers.6.mlp.fc1.weight [768, 3072] True +encoder.layers.6.mlp.fc1.bias [3072] True +encoder.layers.6.mlp.fc2.weight [3072, 768] True +encoder.layers.6.mlp.fc2.bias [768] True +encoder.layers.7.attn_norm.weight [768] True +encoder.layers.7.attn_norm.bias [768] True +encoder.layers.7.attn.qkv.weight [768, 2304] True +encoder.layers.7.attn.qkv.bias [2304] True +encoder.layers.7.attn.out.weight [768, 768] True +encoder.layers.7.attn.out.bias [768] True +encoder.layers.7.mlp_norm.weight [768] True +encoder.layers.7.mlp_norm.bias [768] True +encoder.layers.7.mlp.fc1.weight [768, 3072] True +encoder.layers.7.mlp.fc1.bias [3072] True +encoder.layers.7.mlp.fc2.weight [3072, 768] True +encoder.layers.7.mlp.fc2.bias [768] True +encoder.layers.8.attn_norm.weight [768] True +encoder.layers.8.attn_norm.bias [768] True +encoder.layers.8.attn.qkv.weight [768, 2304] True +encoder.layers.8.attn.qkv.bias [2304] True +encoder.layers.8.attn.out.weight [768, 768] True +encoder.layers.8.attn.out.bias [768] True +INFO 2022-03-23 10:21:40,155 launch_utils.py:321] terminate process group gid:6102 +INFO 2022-03-23 10:21:40,155 launch_utils.py:321] terminate process group gid:6117 +INFO 2022-03-23 10:21:44,159 launch_utils.py:342] terminate all the procs +ERROR 2022-03-23 10:21:44,159 launch_utils.py:638] ABORT!!! Out of all 8 trainers, the trainer process with rank=[1, 2, 3, 4, 6, 7] was aborted. Please check its log. +INFO 2022-03-23 10:21:48,163 launch_utils.py:342] terminate all the procs +INFO 2022-03-23 10:21:48,163 launch.py:391] Local processes completed. +encoder.layers.8.mlp_norm.weight [768] True +encoder.layers.8.mlp_norm.bias [768] True +encoder.layers.8.mlp.fc1.weight [768, 3072] True +encoder.layers.8.mlp.fc1.bias [3072] True +encoder.layers.8.mlp.fc2.weight [3072, 768] True +encoder.layers.8.mlp.fc2.bias [768] True +encoder.layers.9.attn_norm.weight [768] True +encoder.layers.9.attn_norm.bias [768] True +encoder.layers.9.attn.qkv.weight [768, 2304] True +encoder.layers.9.attn.qkv.bias [2304] True +encoder.layers.9.attn.out.weight [768, 768] True +encoder.layers.9.attn.out.bias [768] True +encoder.layers.9.mlp_norm.weight [768] True +encoder.layers.9.mlp_norm.bias [768] True +encoder.layers.9.mlp.fc1.weight [768, 3072] True +encoder.layers.9.mlp.fc1.bias [3072] True +encoder.layers.9.mlp.fc2.weight [3072, 768] True +encoder.layers.9.mlp.fc2.bias [768] True +encoder.layers.10.attn_norm.weight [768] True +encoder.layers.10.attn_norm.bias [768] True +encoder.layers.10.attn.qkv.weight [768, 2304] True +encoder.layers.10.attn.qkv.bias [2304] True +encoder.layers.10.attn.out.weight [768, 768] True +encoder.layers.10.attn.out.bias [768] True +encoder.layers.10.mlp_norm.weight [768] True +encoder.layers.10.mlp_norm.bias [768] True +encoder.layers.10.mlp.fc1.weight [768, 3072] True +encoder.layers.10.mlp.fc1.bias [3072] True +encoder.layers.10.mlp.fc2.weight [3072, 768] True +encoder.layers.10.mlp.fc2.bias [768] True +encoder.layers.11.attn_norm.weight [768] True +encoder.layers.11.attn_norm.bias [768] True +encoder.layers.11.attn.qkv.weight [768, 2304] True +encoder.layers.11.attn.qkv.bias [2304] True +encoder.layers.11.attn.out.weight [768, 768] True +encoder.layers.11.attn.out.bias [768] True +encoder.layers.11.mlp_norm.weight [768] True +encoder.layers.11.mlp_norm.bias [768] True +encoder.layers.11.mlp.fc1.weight [768, 3072] True +encoder.layers.11.mlp.fc1.bias [3072] True +encoder.layers.11.mlp.fc2.weight [3072, 768] True +encoder.layers.11.mlp.fc2.bias [768] True +encoder.norm.weight [768] True +encoder.norm.bias [768] True +classifier.0.weight [768] False +classifier.0.bias [768] False +classifier.0._mean [768] False +classifier.0._variance [768] False +classifier.1.weight [768, 1000] False +classifier.1.bias [1000] False +server not ready, wait 3 sec to retry... +not ready endpoints:['127.0.0.1:24455', '127.0.0.1:49357', '127.0.0.1:29615', '127.0.0.1:17426', '127.0.0.1:33084', '127.0.0.1:60249', '127.0.0.1:48028'] +I0323 10:08:07.629041 6102 nccl_context.cc:82] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 0 +I0323 10:08:09.865425 6102 nccl_context.cc:114] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 10 +2022-03-23 10:08:11,574-INFO: [topology.py:169:__init__] HybridParallelInfo: rank_id: 0, mp_degree: 1, sharding_degree: 1, pp_degree: 1, dp_degree: 8, mp_group: [0], sharding_group: [0], pp_group: [0], dp_group: [0, 1, 2, 3, 4, 5, 6, 7], check/clip group: [0] +2022-03-23 10:08:11,575 MASTER_LOG ----- Total # of train batch (single gpu): 312 +2022-03-23 10:08:11,576 MASTER_LOG ----- Total # of val batch (single gpu): 13 +2022-03-23 10:08:11,576 MASTER_LOG Base lr is scaled to: 6.4 +2022-03-23 10:08:12,843 MASTER_LOG ----- Pretrained: Load model state from ./mae_pretrain_vit_base.pdparams +2022-03-23 10:08:12,880 MASTER_LOG ----- Start training from epoch 1. +2022-03-23 10:08:12,880 MASTER_LOG Train epoch 1. LR=6.400000e-01 +2022-03-23 10:08:20,797 MASTER_LOG Epoch[001/090], Step[0000/0312], Lr: 0.000000e+00, Loss: 6.9421 (6.9421), Avg Acc: 0.0010 +2022-03-23 10:09:09,470 MASTER_LOG Epoch[001/090], Step[0020/0312], Lr: 4.102564e-02, Loss: 6.8971 (6.9324), Avg Acc: 0.0010 +2022-03-23 10:09:58,105 MASTER_LOG Epoch[001/090], Step[0040/0312], Lr: 8.205128e-02, Loss: 6.6101 (6.8617), Avg Acc: 0.0052 +2022-03-23 10:10:45,402 MASTER_LOG Epoch[001/090], Step[0060/0312], Lr: 1.230769e-01, Loss: 6.0317 (6.7012), Avg Acc: 0.0279 +2022-03-23 10:11:32,415 MASTER_LOG Epoch[001/090], Step[0080/0312], Lr: 1.641026e-01, Loss: 5.3711 (6.4640), Avg Acc: 0.0567 +2022-03-23 10:12:20,143 MASTER_LOG Epoch[001/090], Step[0100/0312], Lr: 2.051282e-01, Loss: 4.7358 (6.1868), Avg Acc: 0.0836 +2022-03-23 10:13:08,619 MASTER_LOG Epoch[001/090], Step[0120/0312], Lr: 2.461538e-01, Loss: 4.1960 (5.9053), Avg Acc: 0.1104 +2022-03-23 10:13:55,861 MASTER_LOG Epoch[001/090], Step[0140/0312], Lr: 2.871795e-01, Loss: 3.8316 (5.6387), Avg Acc: 0.1366 +2022-03-23 10:14:44,284 MASTER_LOG Epoch[001/090], Step[0160/0312], Lr: 3.282051e-01, Loss: 3.5256 (5.3938), Avg Acc: 0.1613 +2022-03-23 10:15:32,316 MASTER_LOG Epoch[001/090], Step[0180/0312], Lr: 3.692308e-01, Loss: 3.2858 (5.1746), Avg Acc: 0.1840 +2022-03-23 10:16:21,607 MASTER_LOG Epoch[001/090], Step[0200/0312], Lr: 4.102564e-01, Loss: 3.0855 (4.9790), Avg Acc: 0.2046 +2022-03-23 10:17:09,678 MASTER_LOG Epoch[001/090], Step[0220/0312], Lr: 4.512821e-01, Loss: 2.9397 (4.8059), Avg Acc: 0.2231 +2022-03-23 10:17:57,601 MASTER_LOG Epoch[001/090], Step[0240/0312], Lr: 4.923077e-01, Loss: 2.8253 (4.6502), Avg Acc: 0.2400 +2022-03-23 10:18:45,861 MASTER_LOG Epoch[001/090], Step[0260/0312], Lr: 5.333333e-01, Loss: 2.7706 (4.5104), Avg Acc: 0.2556 +2022-03-23 10:19:33,782 MASTER_LOG Epoch[001/090], Step[0280/0312], Lr: 5.743590e-01, Loss: 2.7140 (4.3843), Avg Acc: 0.2697 +2022-03-23 10:20:22,668 MASTER_LOG Epoch[001/090], Step[0300/0312], Lr: 6.153846e-01, Loss: 2.6050 (4.2708), Avg Acc: 0.2826 +2022-03-23 10:20:47,517 MASTER_LOG Epoch[001/090], Step[0311/0312], Lr: 6.317949e-01, Loss: 2.6399 (4.2128), Avg Acc: 0.2891 +2022-03-23 10:20:49,805 MASTER_LOG ----- Epoch[001/090], Lr: 6.317949e-01, time: 756.92Train Loss: 4.2128, Train Acc: 0.2891 +2022-03-23 10:20:49,805 MASTER_LOG ----- Validation after Epoch: 1 +2022-03-23 10:20:59,660 MASTER_LOG Step[0000/0013], Avg Loss: 1.7821, Avg Acc@1: 0.5928, Avg Acc@5: 0.8276 +Traceback (most recent call last): + File "main_multi_gpu_linearprobe.py", line 622, in + main() + File "main_multi_gpu_linearprobe.py", line 618, in main + main_worker(config, dataset_train, dataset_val) + File "main_multi_gpu_linearprobe.py", line 569, in main_worker + master_logger=master_logger) + File "", line 2, in validate + File "/usr/local/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py", line 351, in _decorate_function + return func(*args, **kwargs) + File "main_multi_gpu_linearprobe.py", line 272, in validate + paddle.distrtibuted.barrier() +AttributeError: module 'paddle' has no attribute 'distrtibuted' +WARNING 2022-03-23 16:59:15,049 launch.py:503] Not found distinct arguments and compiled with cuda or xpu or npu. Default use collective mode +INFO 2022-03-23 16:59:15,051 launch_utils.py:557] Local start 8 processes. First process distributed environment info (Only For Debug): + +=======================================================================================+ + | Distributed Envs Value | + +---------------------------------------------------------------------------------------+ + | PADDLE_TRAINER_ID 0 | + | PADDLE_CURRENT_ENDPOINT 127.0.0.1:43622 | + | PADDLE_TRAINERS_NUM 8 | + | PADDLE_TRAINER_ENDPOINTS ... 0.1:10870,127.0.0.1:15574,127.0.0.1:32888| + | PADDLE_RANK_IN_NODE 0 | + | PADDLE_LOCAL_DEVICE_IDS 0 | + | PADDLE_WORLD_DEVICE_IDS 0,1,2,3,4,5,6,7 | + | FLAGS_selected_gpus 0 | + | FLAGS_selected_accelerators 0 | + +=======================================================================================+ + +INFO 2022-03-23 16:59:15,052 launch_utils.py:562] details about PADDLE_TRAINER_ENDPOINTS can be found in log/endpoints.log, and detail running logs maybe found in log/workerlog.0 +INFO 2022-03-23 16:59:22,188 launch_utils.py:342] terminate all the procs +ERROR 2022-03-23 16:59:22,188 launch_utils.py:638] ABORT!!! Out of all 8 trainers, the trainer process with rank=[0, 1, 2, 3, 4, 5, 6, 7] was aborted. Please check its log. +INFO 2022-03-23 16:59:26,192 launch_utils.py:342] terminate all the procs +INFO 2022-03-23 16:59:26,192 launch.py:391] Local processes completed. +----------- Configuration Arguments ----------- +backend: auto +cluster_topo_path: None +elastic_pre_hook: None +elastic_server: None +enable_auto_mapping: False +force: False +gpus: 0,1,2,3,4,5,6,7 +heter_devices: +heter_worker_num: None +heter_workers: +host: None +http_port: None +ips: 127.0.0.1 +job_id: None +log_dir: log +np: None +nproc_per_node: None +rank_mapping_path: None +run_mode: None +scale: 0 +server_num: None +servers: +training_script: main_multi_gpu_linearprobe.py +training_script_args: ['-cfg=./configs/vit_base_patch16_224_linearprobe_single_node.yaml', '-dataset=imagenet2012', '-batch_size=512', '-data_path=/dataset/imagenet', '-pretrained=./mae_pretrain_vit_base.pdparams', '-amp'] +worker_num: None +workers: +------------------------------------------------ +launch train in GPU mode! +launch proc_id:6736 idx:0 +launch proc_id:6739 idx:1 +launch proc_id:6742 idx:2 +launch proc_id:6745 idx:3 +launch proc_id:6748 idx:4 +launch proc_id:6751 idx:5 +launch proc_id:6754 idx:6 +launch proc_id:6757 idx:7 +Traceback (most recent call last): + File "main_multi_gpu_linearprobe.py", line 42, in + from transformer import build_transformer as build_model + File "/workspace/ppvit_github/PaddleViT_Train/PaddleViT/image_classification/paddlecloud/MAE_gitlab/MAE_paddle/transformer.py", line 748 + bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) + ^ +SyntaxError: invalid syntax +WARNING 2022-03-23 17:10:58,917 launch.py:503] Not found distinct arguments and compiled with cuda or xpu or npu. Default use collective mode +INFO 2022-03-23 17:10:58,919 launch_utils.py:557] Local start 8 processes. First process distributed environment info (Only For Debug): + +=======================================================================================+ + | Distributed Envs Value | + +---------------------------------------------------------------------------------------+ + | PADDLE_TRAINER_ID 0 | + | PADDLE_CURRENT_ENDPOINT 127.0.0.1:25091 | + | PADDLE_TRAINERS_NUM 8 | + | PADDLE_TRAINER_ENDPOINTS ... 0.1:56764,127.0.0.1:55133,127.0.0.1:16222| + | PADDLE_RANK_IN_NODE 0 | + | PADDLE_LOCAL_DEVICE_IDS 0 | + | PADDLE_WORLD_DEVICE_IDS 0,1,2,3,4,5,6,7 | + | FLAGS_selected_gpus 0 | + | FLAGS_selected_accelerators 0 | + +=======================================================================================+ + +INFO 2022-03-23 17:10:58,919 launch_utils.py:562] details about PADDLE_TRAINER_ENDPOINTS can be found in log/endpoints.log, and detail running logs maybe found in log/workerlog.0 +----------- Configuration Arguments ----------- +backend: auto +cluster_topo_path: None +elastic_pre_hook: None +elastic_server: None +enable_auto_mapping: False +force: False +gpus: 0,1,2,3,4,5,6,7 +heter_devices: +heter_worker_num: None +heter_workers: +host: None +http_port: None +ips: 127.0.0.1 +job_id: None +log_dir: log +np: None +nproc_per_node: None +rank_mapping_path: None +run_mode: None +scale: 0 +server_num: None +servers: +training_script: main_multi_gpu_linearprobe.py +training_script_args: ['-cfg=./configs/vit_base_patch16_224_linearprobe_single_node.yaml', '-dataset=imagenet2012', '-batch_size=512', '-data_path=/dataset/imagenet', '-pretrained=./mae_pretrain_vit_base.pdparams', '-amp'] +worker_num: None +workers: +------------------------------------------------ +launch train in GPU mode! +launch proc_id:6911 idx:0 +launch proc_id:6914 idx:1 +launch proc_id:6917 idx:2 +launch proc_id:6920 idx:3 +launch proc_id:6923 idx:4 +launch proc_id:6926 idx:5 +launch proc_id:6929 idx:6 +launch proc_id:6932 idx:7 +/usr/local/lib/python3.7/site-packages/paddlenlp/transformers/funnel/modeling.py:30: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working + from collections import Iterable +Compose( + + + + +) +----- Imagenet2012 train_list.txt len = 1281167 +----- Imagenet2012 val_list.txt len = 50000 +2022-03-23 17:11:06,114 MASTER_LOG ----- world_size = 8, local_rank = 0 +----- AMP: True +BASE: [''] +DATA: + BATCH_SIZE: 512 + BATCH_SIZE_EVAL: 512 + CROP_PCT: 0.875 + DATASET: imagenet2012 + DATA_PATH: /dataset/imagenet + IMAGENET_MEAN: [0.485, 0.456, 0.406] + IMAGENET_STD: [0.229, 0.224, 0.225] + IMAGE_CHANNELS: 3 + IMAGE_SIZE: 224 + NUM_WORKERS: 2 +EVAL: False +MODEL: + ATTENTION_DROPOUT: 0.0 + DECODER: + DEPTH: 8 + EMBED_DIM: 512 + NUM_HEADS: 16 + DROPOUT: 0.0 + DROPPATH: 0.0 + ENCODER: + DEPTH: 12 + EMBED_DIM: 768 + NUM_HEADS: 12 + GLOBAL_POOL: False + MASK_RATIO: 0.75 + MLP_RATIO: 4.0 + NAME: vit_base_patch16_224 + NORM_PIX_LOSS: True + NUM_CLASSES: 1000 + PATCH_SIZE: 16 + PRETRAINED: ./mae_pretrain_vit_base.pdparams + QKV_BIAS: True + RESUME: None + TYPE: LINEARPROBE +REPORT_FREQ: 20 +SAVE: ./output/linearprobe-20220323-17-11 +SAVE_FREQ: 10 +SEED: 0 +TRAIN: + ACCUM_ITER: 4 + AUTO_AUGMENT: False + BASE_LR: 0.1 + COLOR_JITTER: 0.4 + CUTMIX_ALPHA: 1.0 + CUTMIX_MINMAX: None + END_LR: 0.0 + GRAD_CLIP: None + LAST_EPOCH: 0 + LAYER_DECAY: None + LINEAR_SCALED_LR: 256 + MIXUP_ALPHA: 0.8 + MIXUP_MODE: batch + MIXUP_PROB: 1.0 + MIXUP_SWITCH_PROB: 0.5 + NUM_EPOCHS: 90 + OPTIMIZER: + BETAS: (0.9, 0.95) + EPS: 1e-08 + NAME: LARS + RANDOM_ERASE_COUNT: 1 + RANDOM_ERASE_MODE: pixel + RANDOM_ERASE_PROB: 0.25 + RANDOM_ERASE_SPLIT: False + RAND_AUGMENT: True + RAND_AUGMENT_LAYERS: 2 + RAND_AUGMENT_MAGNITUDE: 9 + SMOOTHING: 0.1 + WARMUP_EPOCHS: 10 + WARMUP_START_LR: 0.0 + WEIGHT_DECAY: 0.0 +VALIDATE_FREQ: 1 +W0323 17:11:06.116573 6911 gpu_context.cc:240] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.2, Runtime API Version: 10.2 +W0323 17:11:06.121726 6911 gpu_context.cc:268] device: 0, cuDNN Version: 7.6. +encoder_position_embedding [1, 197, 768] True +cls_token [1, 1, 768] True +patch_embedding.patch_embedding.weight [768, 3, 16, 16] True +patch_embedding.patch_embedding.bias [768] True +encoder.layers.0.attn_norm.weight [768] True +encoder.layers.0.attn_norm.bias [768] True +encoder.layers.0.attn.qkv.weight [768, 2304] True +encoder.layers.0.attn.qkv.bias [2304] True +encoder.layers.0.attn.out.weight [768, 768] True +encoder.layers.0.attn.out.bias [768] True +encoder.layers.0.mlp_norm.weight [768] True +encoder.layers.0.mlp_norm.bias [768] True +encoder.layers.0.mlp.fc1.weight [768, 3072] True +encoder.layers.0.mlp.fc1.bias [3072] True +encoder.layers.0.mlp.fc2.weight [3072, 768] True +encoder.layers.0.mlp.fc2.bias [768] True +encoder.layers.1.attn_norm.weight [768] True +encoder.layers.1.attn_norm.bias [768] True +encoder.layers.1.attn.qkv.weight [768, 2304] True +encoder.layers.1.attn.qkv.bias [2304] True +encoder.layers.1.attn.out.weight [768, 768] True +encoder.layers.1.attn.out.bias [768] True +encoder.layers.1.mlp_norm.weight [768] True +encoder.layers.1.mlp_norm.bias [768] True +encoder.layers.1.mlp.fc1.weight [768, 3072] True +encoder.layers.1.mlp.fc1.bias [3072] True +encoder.layers.1.mlp.fc2.weight [3072, 768] True +encoder.layers.1.mlp.fc2.bias [768] True +encoder.layers.2.attn_norm.weight [768] True +encoder.layers.2.attn_norm.bias [768] True +encoder.layers.2.attn.qkv.weight [768, 2304] True +encoder.layers.2.attn.qkv.bias [2304] True +encoder.layers.2.attn.out.weight [768, 768] True +encoder.layers.2.attn.out.bias [768] True +encoder.layers.2.mlp_norm.weight [768] True +encoder.layers.2.mlp_norm.bias [768] True +encoder.layers.2.mlp.fc1.weight [768, 3072] True +encoder.layers.2.mlp.fc1.bias [3072] True +encoder.layers.2.mlp.fc2.weight [3072, 768] True +encoder.layers.2.mlp.fc2.bias [768] True +encoder.layers.3.attn_norm.weight [768] True +encoder.layers.3.attn_norm.bias [768] True +encoder.layers.3.attn.qkv.weight [768, 2304] True +encoder.layers.3.attn.qkv.bias [2304] True +encoder.layers.3.attn.out.weight [768, 768] True +encoder.layers.3.attn.out.bias [768] True +encoder.layers.3.mlp_norm.weight [768] True +encoder.layers.3.mlp_norm.bias [768] True +encoder.layers.3.mlp.fc1.weight [768, 3072] True +encoder.layers.3.mlp.fc1.bias [3072] True +encoder.layers.3.mlp.fc2.weight [3072, 768] True +encoder.layers.3.mlp.fc2.bias [768] True +encoder.layers.4.attn_norm.weight [768] True +encoder.layers.4.attn_norm.bias [768] True +encoder.layers.4.attn.qkv.weight [768, 2304] True +encoder.layers.4.attn.qkv.bias [2304] True +encoder.layers.4.attn.out.weight [768, 768] True +encoder.layers.4.attn.out.bias [768] True +encoder.layers.4.mlp_norm.weight [768] True +encoder.layers.4.mlp_norm.bias [768] True +encoder.layers.4.mlp.fc1.weight [768, 3072] True +encoder.layers.4.mlp.fc1.bias [3072] True +encoder.layers.4.mlp.fc2.weight [3072, 768] True +encoder.layers.4.mlp.fc2.bias [768] True +encoder.layers.5.attn_norm.weight [768] True +encoder.layers.5.attn_norm.bias [768] True +encoder.layers.5.attn.qkv.weight [768, 2304] True +encoder.layers.5.attn.qkv.bias [2304] True +encoder.layers.5.attn.out.weight [768, 768] True +encoder.layers.5.attn.out.bias [768] True +encoder.layers.5.mlp_norm.weight [768] True +encoder.layers.5.mlp_norm.bias [768] True +encoder.layers.5.mlp.fc1.weight [768, 3072] True +encoder.layers.5.mlp.fc1.bias [3072] True +encoder.layers.5.mlp.fc2.weight [3072, 768] True +encoder.layers.5.mlp.fc2.bias [768] True +encoder.layers.6.attn_norm.weight [768] True +encoder.layers.6.attn_norm.bias [768] True +encoder.layers.6.attn.qkv.weight [768, 2304] True +encoder.layers.6.attn.qkv.bias [2304] True +encoder.layers.6.attn.out.weight [768, 768] True +encoder.layers.6.attn.out.bias [768] True +encoder.layers.6.mlp_norm.weight [768] True +encoder.layers.6.mlp_norm.bias [768] True +encoder.layers.6.mlp.fc1.weight [768, 3072] True +encoder.layers.6.mlp.fc1.bias [3072] True +encoder.layers.6.mlp.fc2.weight [3072, 768] True +encoder.layers.6.mlp.fc2.bias [768] True +encoder.layers.7.attn_norm.weight [768] True +encoder.layers.7.attn_norm.bias [768] True +encoder.layers.7.attn.qkv.weight [768, 2304] True +encoder.layers.7.attn.qkv.bias [2304] True +encoder.layers.7.attn.out.weight [768, 768] True +encoder.layers.7.attn.out.bias [768] True +encoder.layers.7.mlp_norm.weight [768] True +encoder.layers.7.mlp_norm.bias [768] True +encoder.layers.7.mlp.fc1.weight [768, 3072] True +encoder.layers.7.mlp.fc1.bias [3072] True +encoder.layers.7.mlp.fc2.weight [3072, 768] True +encoder.layers.7.mlp.fc2.bias [768] True +encoder.layers.8.attn_norm.weight [768] True +encoder.layers.8.attn_norm.bias [768] True +encoder.layers.8.attn.qkv.weight [768, 2304] True +encoder.layers.8.attn.qkv.bias [2304] True +encoder.layers.8.attn.out.weight [768, 768] True +encoder.layers.8.attn.out.bias [768] True +encoder.layers.8.mlp_norm.weight [768] True +encoder.layers.8.mlp_norm.bias [768] True +encoder.layers.8.mlp.fc1.weight [768, 3072] True +encoder.layers.8.mlp.fc1.bias [3072] True +encoder.layers.8.mlp.fc2.weight [3072, 768] True +encoder.layers.8.mlp.fc2.bias [768] True +encoder.layers.9.attn_norm.weight [768] True +encoder.layers.9.attn_norm.bias [768] True +encoder.layers.9.attn.qkv.weight [768, 2304] True +encoder.layers.9.attn.qkv.bias [2304] True +encoder.layers.9.attn.out.weight [768, 768] True +encoder.layers.9.attn.out.bias [768] True +encoder.layers.9.mlp_norm.weight [768] True +encoder.layers.9.mlp_norm.bias [768] True +encoder.layers.9.mlp.fc1.weight [768, 3072] True +encoder.layers.9.mlp.fc1.bias [3072] True +encoder.layers.9.mlp.fc2.weight [3072, 768] True +encoder.layers.9.mlp.fc2.bias [768] True +encoder.layers.10.attn_norm.weight [768] True +encoder.layers.10.attn_norm.bias [768] True +encoder.layers.10.attn.qkv.weight [768, 2304] True +encoder.layers.10.attn.qkv.bias [2304] True +encoder.layers.10.attn.out.weight [768, 768] True +encoder.layers.10.attn.out.bias [768] True +encoder.layers.10.mlp_norm.weight [768] True +encoder.layers.10.mlp_norm.bias [768] True +encoder.layers.10.mlp.fc1.weight [768, 3072] True +encoder.layers.10.mlp.fc1.bias [3072] True +encoder.layers.10.mlp.fc2.weight [3072, 768] True +encoder.layers.10.mlp.fc2.bias [768] True +encoder.layers.11.attn_norm.weight [768] True +encoder.layers.11.attn_norm.bias [768] True +encoder.layers.11.attn.qkv.weight [768, 2304] True +encoder.layers.11.attn.qkv.bias [2304] True +encoder.layers.11.attn.out.weight [768, 768] True +encoder.layers.11.attn.out.bias [768] True +encoder.layers.11.mlp_norm.weight [768] True +encoder.layers.11.mlp_norm.bias [768] True +encoder.layers.11.mlp.fc1.weight [768, 3072] True +encoder.layers.11.mlp.fc1.bias [3072] True +encoder.layers.11.mlp.fc2.weight [3072, 768] True +encoder.layers.11.mlp.fc2.bias [768] True +encoder.norm.weight [768] True +encoder.norm.bias [768] True +classifier.0.weight [768] False +classifier.0.bias [768] False +classifier.0._mean [768] False +classifier.0._variance [768] False +classifier.1.weight [768, 1000] False +classifier.1.bias [1000] False +server not ready, wait 3 sec to retry... +not ready endpoints:['127.0.0.1:56293', '127.0.0.1:32775', '127.0.0.1:34540', '127.0.0.1:39983', '127.0.0.1:56764', '127.0.0.1:16222'] +I0323 17:11:19.512193 6911 nccl_context.cc:82] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 0 +I0323 17:11:22.297502 6911 nccl_context.cc:114] init nccl context nranks: 8 local rank: 0 gpu id: 0 ring id: 10 +2022-03-23 17:11:23,813-INFO: [topology.py:169:__init__] HybridParallelInfo: rank_id: 0, mp_degree: 1, sharding_degree: 1, pp_degree: 1, dp_degree: 8, mp_group: [0], sharding_group: [0], pp_group: [0], dp_group: [0, 1, 2, 3, 4, 5, 6, 7], check/clip group: [0] +2022-03-23 17:11:23,815 MASTER_LOG ----- Total # of train batch (single gpu): 312 +2022-03-23 17:11:23,816 MASTER_LOG ----- Total # of val batch (single gpu): 13 +2022-03-23 17:11:23,816 MASTER_LOG Base lr is scaled to: 6.4 +2022-03-23 17:11:24,992 MASTER_LOG ----- Pretrained: Load model state from ./mae_pretrain_vit_base.pdparams +2022-03-23 17:11:25,028 MASTER_LOG ----- Start training from epoch 1. +2022-03-23 17:11:25,028 MASTER_LOG Train epoch 1. LR=6.400000e-01 +2022-03-23 17:11:33,357 MASTER_LOG Epoch[001/090], Step[0000/0312], Lr: 0.000000e+00, Loss: 6.9446 (6.9446), Avg Acc: 0.0007 +2022-03-23 17:12:20,673 MASTER_LOG Epoch[001/090], Step[0020/0312], Lr: 4.102564e-02, Loss: 6.8917 (6.9327), Avg Acc: 0.0012 +2022-03-23 17:13:07,815 MASTER_LOG Epoch[001/090], Step[0040/0312], Lr: 8.205128e-02, Loss: 6.6096 (6.8621), Avg Acc: 0.0055 +2022-03-23 17:13:56,638 MASTER_LOG Epoch[001/090], Step[0060/0312], Lr: 1.230769e-01, Loss: 6.0556 (6.7020), Avg Acc: 0.0276 +2022-03-23 17:14:44,062 MASTER_LOG Epoch[001/090], Step[0080/0312], Lr: 1.641026e-01, Loss: 5.3570 (6.4639), Avg Acc: 0.0569 +2022-03-23 17:15:31,357 MASTER_LOG Epoch[001/090], Step[0100/0312], Lr: 2.051282e-01, Loss: 4.7307 (6.1849), Avg Acc: 0.0838 +2022-03-23 17:16:18,717 MASTER_LOG Epoch[001/090], Step[0120/0312], Lr: 2.461538e-01, Loss: 4.2049 (5.9027), Avg Acc: 0.1108 +2022-03-23 17:17:07,088 MASTER_LOG Epoch[001/090], Step[0140/0312], Lr: 2.871795e-01, Loss: 3.8519 (5.6359), Avg Acc: 0.1372 +2022-03-23 17:17:52,834 MASTER_LOG Epoch[001/090], Step[0160/0312], Lr: 3.282051e-01, Loss: 3.5104 (5.3910), Avg Acc: 0.1618 +2022-03-23 17:18:40,421 MASTER_LOG Epoch[001/090], Step[0180/0312], Lr: 3.692308e-01, Loss: 3.2756 (5.1712), Avg Acc: 0.1848 +2022-03-23 17:19:27,238 MASTER_LOG Epoch[001/090], Step[0200/0312], Lr: 4.102564e-01, Loss: 3.1019 (4.9761), Avg Acc: 0.2053 +2022-03-23 17:20:15,138 MASTER_LOG Epoch[001/090], Step[0220/0312], Lr: 4.512821e-01, Loss: 2.9507 (4.8022), Avg Acc: 0.2238 +2022-03-23 17:21:03,445 MASTER_LOG Epoch[001/090], Step[0240/0312], Lr: 4.923077e-01, Loss: 2.8121 (4.6464), Avg Acc: 0.2408 +2022-03-23 17:21:51,046 MASTER_LOG Epoch[001/090], Step[0260/0312], Lr: 5.333333e-01, Loss: 2.8340 (4.5066), Avg Acc: 0.2561 +2022-03-23 17:22:38,265 MASTER_LOG Epoch[001/090], Step[0280/0312], Lr: 5.743590e-01, Loss: 2.7239 (4.3801), Avg Acc: 0.2704 +2022-03-23 17:23:25,086 MASTER_LOG Epoch[001/090], Step[0300/0312], Lr: 6.153846e-01, Loss: 2.6461 (4.2663), Avg Acc: 0.2833 +2022-03-23 17:23:52,120 MASTER_LOG Epoch[001/090], Step[0311/0312], Lr: 6.317949e-01, Loss: 2.5715 (4.2076), Avg Acc: 0.2901 +2022-03-23 17:23:53,861 MASTER_LOG ----- Epoch[001/090], Lr: 6.317949e-01, time: 748.83Train Loss: 4.2076, Train Acc: 0.2901 +2022-03-23 17:23:53,861 MASTER_LOG ----- Validation after Epoch: 1 +2022-03-23 17:24:03,513 MASTER_LOG Step[0000/0013], Avg Loss: 1.7718, Avg Acc@1: 0.6011, Avg Acc@5: 0.8276 +2022-03-23 17:24:38,941 MASTER_LOG ----- Epoch[001/090], Validation Loss: 2.3240, Validation Acc@1: 0.5098, Validation Acc@5: 0.7485, time: 45.08 +2022-03-23 17:24:38,941 MASTER_LOG Train epoch 2. LR=6.317949e-01 +2022-03-23 17:24:46,390 MASTER_LOG Epoch[002/090], Step[0000/0312], Lr: 6.400000e-01, Loss: 2.6023 (2.6023), Avg Acc: 0.4768 +2022-03-23 17:25:35,702 MASTER_LOG Epoch[002/090], Step[0020/0312], Lr: 6.810256e-01, Loss: 2.5761 (2.5589), Avg Acc: 0.4805 +2022-03-23 17:26:23,060 MASTER_LOG Epoch[002/090], Step[0040/0312], Lr: 7.220513e-01, Loss: 2.4909 (2.5406), Avg Acc: 0.4835 +2022-03-23 17:27:11,715 MASTER_LOG Epoch[002/090], Step[0060/0312], Lr: 7.630769e-01, Loss: 2.4347 (2.5125), Avg Acc: 0.4874 +2022-03-23 17:27:58,314 MASTER_LOG Epoch[002/090], Step[0080/0312], Lr: 8.041026e-01, Loss: 2.4801 (2.4908), Avg Acc: 0.4906 +2022-03-23 17:28:47,354 MASTER_LOG Epoch[002/090], Step[0100/0312], Lr: 8.451282e-01, Loss: 2.3328 (2.4697), Avg Acc: 0.4936 +2022-03-23 17:29:33,551 MASTER_LOG Epoch[002/090], Step[0120/0312], Lr: 8.861538e-01, Loss: 2.3951 (2.4528), Avg Acc: 0.4959 +2022-03-23 17:30:20,979 MASTER_LOG Epoch[002/090], Step[0140/0312], Lr: 9.271795e-01, Loss: 2.3484 (2.4384), Avg Acc: 0.4980 +2022-03-23 17:31:08,425 MASTER_LOG Epoch[002/090], Step[0160/0312], Lr: 9.682051e-01, Loss: 2.3388 (2.4240), Avg Acc: 0.4999 +2022-03-23 17:31:55,103 MASTER_LOG Epoch[002/090], Step[0180/0312], Lr: 1.009231e+00, Loss: 2.2949 (2.4089), Avg Acc: 0.5016 +2022-03-23 17:32:42,267 MASTER_LOG Epoch[002/090], Step[0200/0312], Lr: 1.050256e+00, Loss: 2.2508 (2.3963), Avg Acc: 0.5033 +2022-03-23 17:33:29,309 MASTER_LOG Epoch[002/090], Step[0220/0312], Lr: 1.091282e+00, Loss: 2.2273 (2.3832), Avg Acc: 0.5050 +2022-03-23 17:34:16,509 MASTER_LOG Epoch[002/090], Step[0240/0312], Lr: 1.132308e+00, Loss: 2.2667 (2.3711), Avg Acc: 0.5066 +2022-03-23 17:35:03,547 MASTER_LOG Epoch[002/090], Step[0260/0312], Lr: 1.173333e+00, Loss: 2.1516 (2.3599), Avg Acc: 0.5081 +2022-03-23 17:35:50,751 MASTER_LOG Epoch[002/090], Step[0280/0312], Lr: 1.214359e+00, Loss: 2.1651 (2.3495), Avg Acc: 0.5097 +2022-03-23 17:36:38,494 MASTER_LOG Epoch[002/090], Step[0300/0312], Lr: 1.255385e+00, Loss: 2.1509 (2.3386), Avg Acc: 0.5112 +2022-03-23 17:37:02,761 MASTER_LOG Epoch[002/090], Step[0311/0312], Lr: 1.271795e+00, Loss: 2.1339 (2.3334), Avg Acc: 0.5119 +2022-03-23 17:37:04,586 MASTER_LOG ----- Epoch[002/090], Lr: 1.271795e+00, time: 744.91Train Loss: 2.3334, Train Acc: 0.5119 +2022-03-23 17:37:04,586 MASTER_LOG ----- Validation after Epoch: 2 +2022-03-23 17:37:14,165 MASTER_LOG Step[0000/0013], Avg Loss: 1.4541, Avg Acc@1: 0.6462, Avg Acc@5: 0.8638 +2022-03-23 17:37:49,147 MASTER_LOG ----- Epoch[002/090], Validation Loss: 1.8723, Validation Acc@1: 0.5792, Validation Acc@5: 0.8077, time: 44.56 +2022-03-23 17:37:49,147 MASTER_LOG Train epoch 3. LR=1.271795e+00 +2022-03-23 17:37:56,819 MASTER_LOG Epoch[003/090], Step[0000/0312], Lr: 1.280000e+00, Loss: 2.1946 (2.1946), Avg Acc: 0.5337 +2022-03-23 17:38:46,531 MASTER_LOG Epoch[003/090], Step[0020/0312], Lr: 1.321026e+00, Loss: 2.1591 (2.1450), Avg Acc: 0.5418 +2022-03-23 17:39:33,529 MASTER_LOG Epoch[003/090], Step[0040/0312], Lr: 1.362051e+00, Loss: 2.1572 (2.1336), Avg Acc: 0.5431 +2022-03-23 17:40:22,091 MASTER_LOG Epoch[003/090], Step[0060/0312], Lr: 1.403077e+00, Loss: 2.1495 (2.1311), Avg Acc: 0.5427 +2022-03-23 17:41:09,779 MASTER_LOG Epoch[003/090], Step[0080/0312], Lr: 1.444103e+00, Loss: 2.1288 (2.1306), Avg Acc: 0.5425 +2022-03-23 17:41:57,564 MASTER_LOG Epoch[003/090], Step[0100/0312], Lr: 1.485128e+00, Loss: 2.1564 (2.1282), Avg Acc: 0.5426 +2022-03-23 17:42:46,372 MASTER_LOG Epoch[003/090], Step[0120/0312], Lr: 1.526154e+00, Loss: 2.0892 (2.1246), Avg Acc: 0.5435 +2022-03-23 17:43:33,219 MASTER_LOG Epoch[003/090], Step[0140/0312], Lr: 1.567179e+00, Loss: 2.0518 (2.1210), Avg Acc: 0.5442 +2022-03-23 17:44:19,415 MASTER_LOG Epoch[003/090], Step[0160/0312], Lr: 1.608205e+00, Loss: 2.0591 (2.1162), Avg Acc: 0.5449 +2022-03-23 17:45:06,776 MASTER_LOG Epoch[003/090], Step[0180/0312], Lr: 1.649231e+00, Loss: 2.0602 (2.1116), Avg Acc: 0.5457 +2022-03-23 17:45:55,760 MASTER_LOG Epoch[003/090], Step[0200/0312], Lr: 1.690256e+00, Loss: 2.1306 (2.1082), Avg Acc: 0.5461 +2022-03-23 17:46:44,146 MASTER_LOG Epoch[003/090], Step[0220/0312], Lr: 1.731282e+00, Loss: 2.0988 (2.1037), Avg Acc: 0.5465 +2022-03-23 17:47:31,329 MASTER_LOG Epoch[003/090], Step[0240/0312], Lr: 1.772308e+00, Loss: 2.0522 (2.1021), Avg Acc: 0.5466 +2022-03-23 17:48:18,243 MASTER_LOG Epoch[003/090], Step[0260/0312], Lr: 1.813333e+00, Loss: 2.0880 (2.0995), Avg Acc: 0.5470 +2022-03-23 17:49:06,664 MASTER_LOG Epoch[003/090], Step[0280/0312], Lr: 1.854359e+00, Loss: 2.0344 (2.0961), Avg Acc: 0.5474 +2022-03-23 17:49:55,323 MASTER_LOG Epoch[003/090], Step[0300/0312], Lr: 1.895385e+00, Loss: 2.0421 (2.0943), Avg Acc: 0.5477 +2022-03-23 17:50:19,965 MASTER_LOG Epoch[003/090], Step[0311/0312], Lr: 1.911795e+00, Loss: 2.1169 (2.0920), Avg Acc: 0.5479 +2022-03-23 17:50:21,824 MASTER_LOG ----- Epoch[003/090], Lr: 1.911795e+00, time: 752.63Train Loss: 2.0920, Train Acc: 0.5479 +2022-03-23 17:50:21,824 MASTER_LOG ----- Validation after Epoch: 3 +2022-03-23 17:50:31,269 MASTER_LOG Step[0000/0013], Avg Loss: 1.3419, Avg Acc@1: 0.6663, Avg Acc@5: 0.8821 +2022-03-23 17:51:05,538 MASTER_LOG ----- Epoch[003/090], Validation Loss: 1.7411, Validation Acc@1: 0.5952, Validation Acc@5: 0.8243, time: 43.71 +2022-03-23 17:51:05,538 MASTER_LOG Train epoch 4. LR=1.911795e+00 +2022-03-23 17:51:13,044 MASTER_LOG Epoch[004/090], Step[0000/0312], Lr: 1.920000e+00, Loss: 2.0631 (2.0631), Avg Acc: 0.5610 +2022-03-23 17:52:02,762 MASTER_LOG Epoch[004/090], Step[0020/0312], Lr: 1.961026e+00, Loss: 1.9861 (2.0117), Avg Acc: 0.5606 +2022-03-23 17:52:50,620 MASTER_LOG Epoch[004/090], Step[0040/0312], Lr: 2.002051e+00, Loss: 2.0450 (2.0117), Avg Acc: 0.5607 +2022-03-23 17:53:36,868 MASTER_LOG Epoch[004/090], Step[0060/0312], Lr: 2.043077e+00, Loss: 2.0140 (2.0095), Avg Acc: 0.5615 +2022-03-23 17:54:23,936 MASTER_LOG Epoch[004/090], Step[0080/0312], Lr: 2.084103e+00, Loss: 2.0372 (2.0074), Avg Acc: 0.5618 +2022-03-23 17:55:12,399 MASTER_LOG Epoch[004/090], Step[0100/0312], Lr: 2.125128e+00, Loss: 1.9818 (2.0083), Avg Acc: 0.5613 +2022-03-23 17:55:58,946 MASTER_LOG Epoch[004/090], Step[0120/0312], Lr: 2.166154e+00, Loss: 1.9951 (2.0078), Avg Acc: 0.5612 +2022-03-23 17:56:46,416 MASTER_LOG Epoch[004/090], Step[0140/0312], Lr: 2.207179e+00, Loss: 2.0044 (2.0089), Avg Acc: 0.5608 +2022-03-23 17:57:33,428 MASTER_LOG Epoch[004/090], Step[0160/0312], Lr: 2.248205e+00, Loss: 2.0099 (2.0087), Avg Acc: 0.5608 +2022-03-23 17:58:20,428 MASTER_LOG Epoch[004/090], Step[0180/0312], Lr: 2.289231e+00, Loss: 2.0586 (2.0084), Avg Acc: 0.5606 +2022-03-23 17:59:07,997 MASTER_LOG Epoch[004/090], Step[0200/0312], Lr: 2.330256e+00, Loss: 2.0418 (2.0088), Avg Acc: 0.5607 +2022-03-23 17:59:55,232 MASTER_LOG Epoch[004/090], Step[0220/0312], Lr: 2.371282e+00, Loss: 1.9856 (2.0089), Avg Acc: 0.5605 +2022-03-23 18:00:42,545 MASTER_LOG Epoch[004/090], Step[0240/0312], Lr: 2.412308e+00, Loss: 1.9600 (2.0096), Avg Acc: 0.5602 +2022-03-23 18:01:29,816 MASTER_LOG Epoch[004/090], Step[0260/0312], Lr: 2.453333e+00, Loss: 2.0218 (2.0077), Avg Acc: 0.5604 +2022-03-23 18:02:16,909 MASTER_LOG Epoch[004/090], Step[0280/0312], Lr: 2.494359e+00, Loss: 1.9706 (2.0078), Avg Acc: 0.5604 +2022-03-23 18:03:03,939 MASTER_LOG Epoch[004/090], Step[0300/0312], Lr: 2.535385e+00, Loss: 2.0153 (2.0068), Avg Acc: 0.5605 +2022-03-23 18:03:28,744 MASTER_LOG Epoch[004/090], Step[0311/0312], Lr: 2.551795e+00, Loss: 2.0599 (2.0073), Avg Acc: 0.5604 +2022-03-23 18:03:30,373 MASTER_LOG ----- Epoch[004/090], Lr: 2.551795e+00, time: 744.83Train Loss: 2.0073, Train Acc: 0.5604 +2022-03-23 18:03:30,373 MASTER_LOG ----- Validation after Epoch: 4 +2022-03-23 18:03:40,089 MASTER_LOG Step[0000/0013], Avg Loss: 1.3449, Avg Acc@1: 0.6704, Avg Acc@5: 0.8762 +2022-03-23 18:04:14,236 MASTER_LOG ----- Epoch[004/090], Validation Loss: 1.6943, Validation Acc@1: 0.6047, Validation Acc@5: 0.8297, time: 43.86 +2022-03-23 18:04:14,236 MASTER_LOG Train epoch 5. LR=2.551795e+00 +2022-03-23 18:04:22,001 MASTER_LOG Epoch[005/090], Step[0000/0312], Lr: 2.560000e+00, Loss: 1.9499 (1.9499), Avg Acc: 0.5603 +2022-03-23 18:05:10,757 MASTER_LOG Epoch[005/090], Step[0020/0312], Lr: 2.601026e+00, Loss: 1.9676 (1.9491), Avg Acc: 0.5711 +2022-03-23 18:05:58,470 MASTER_LOG Epoch[005/090], Step[0040/0312], Lr: 2.642051e+00, Loss: 1.9360 (1.9537), Avg Acc: 0.5702 +2022-03-23 18:06:46,148 MASTER_LOG Epoch[005/090], Step[0060/0312], Lr: 2.683077e+00, Loss: 1.9794 (1.9533), Avg Acc: 0.5703 +2022-03-23 18:07:34,693 MASTER_LOG Epoch[005/090], Step[0080/0312], Lr: 2.724103e+00, Loss: 2.0194 (1.9603), Avg Acc: 0.5692 +2022-03-23 18:08:22,115 MASTER_LOG Epoch[005/090], Step[0100/0312], Lr: 2.765128e+00, Loss: 1.9801 (1.9642), Avg Acc: 0.5684 +2022-03-23 18:09:10,254 MASTER_LOG Epoch[005/090], Step[0120/0312], Lr: 2.806154e+00, Loss: 2.0328 (1.9661), Avg Acc: 0.5678 +2022-03-23 18:09:57,442 MASTER_LOG Epoch[005/090], Step[0140/0312], Lr: 2.847179e+00, Loss: 1.9159 (1.9655), Avg Acc: 0.5679 +2022-03-23 18:10:44,426 MASTER_LOG Epoch[005/090], Step[0160/0312], Lr: 2.888205e+00, Loss: 1.9509 (1.9667), Avg Acc: 0.5675 +2022-03-23 18:11:30,605 MASTER_LOG Epoch[005/090], Step[0180/0312], Lr: 2.929231e+00, Loss: 2.0758 (1.9692), Avg Acc: 0.5670 +2022-03-23 18:12:18,510 MASTER_LOG Epoch[005/090], Step[0200/0312], Lr: 2.970256e+00, Loss: 1.9531 (1.9691), Avg Acc: 0.5668 +2022-03-23 18:13:05,109 MASTER_LOG Epoch[005/090], Step[0220/0312], Lr: 3.011282e+00, Loss: 1.9572 (1.9695), Avg Acc: 0.5667 +2022-03-23 18:13:51,330 MASTER_LOG Epoch[005/090], Step[0240/0312], Lr: 3.052308e+00, Loss: 1.9586 (1.9719), Avg Acc: 0.5662 +2022-03-23 18:14:37,712 MASTER_LOG Epoch[005/090], Step[0260/0312], Lr: 3.093333e+00, Loss: 1.9429 (1.9727), Avg Acc: 0.5660 +2022-03-23 18:15:24,336 MASTER_LOG Epoch[005/090], Step[0280/0312], Lr: 3.134359e+00, Loss: 2.0274 (1.9741), Avg Acc: 0.5658 +2022-03-23 18:16:11,806 MASTER_LOG Epoch[005/090], Step[0300/0312], Lr: 3.175385e+00, Loss: 1.9580 (1.9742), Avg Acc: 0.5658 +2022-03-23 18:16:36,199 MASTER_LOG Epoch[005/090], Step[0311/0312], Lr: 3.191795e+00, Loss: 2.0199 (1.9751), Avg Acc: 0.5656 +2022-03-23 18:16:38,181 MASTER_LOG ----- Epoch[005/090], Lr: 3.191795e+00, time: 742.88Train Loss: 1.9751, Train Acc: 0.5656 +2022-03-23 18:16:38,181 MASTER_LOG ----- Validation after Epoch: 5 +2022-03-23 18:16:47,921 MASTER_LOG Step[0000/0013], Avg Loss: 1.3404, Avg Acc@1: 0.6614, Avg Acc@5: 0.8789 +2022-03-23 18:17:21,776 MASTER_LOG ----- Epoch[005/090], Validation Loss: 1.6810, Validation Acc@1: 0.6075, Validation Acc@5: 0.8324, time: 43.59 +2022-03-23 18:17:21,776 MASTER_LOG Train epoch 6. LR=3.191795e+00 +2022-03-23 18:17:28,904 MASTER_LOG Epoch[006/090], Step[0000/0312], Lr: 3.200000e+00, Loss: 2.0079 (2.0079), Avg Acc: 0.5610 +2022-03-23 18:18:17,204 MASTER_LOG Epoch[006/090], Step[0020/0312], Lr: 3.241026e+00, Loss: 1.8793 (1.9536), Avg Acc: 0.5699 +2022-03-23 18:19:05,063 MASTER_LOG Epoch[006/090], Step[0040/0312], Lr: 3.282051e+00, Loss: 1.9792 (1.9587), Avg Acc: 0.5686 +2022-03-23 18:19:54,814 MASTER_LOG Epoch[006/090], Step[0060/0312], Lr: 3.323077e+00, Loss: 1.9389 (1.9552), Avg Acc: 0.5688 +2022-03-23 18:20:43,388 MASTER_LOG Epoch[006/090], Step[0080/0312], Lr: 3.364103e+00, Loss: 1.9704 (1.9550), Avg Acc: 0.5692 +2022-03-23 18:21:31,471 MASTER_LOG Epoch[006/090], Step[0100/0312], Lr: 3.405128e+00, Loss: 1.9553 (1.9595), Avg Acc: 0.5681 +2022-03-23 18:22:19,837 MASTER_LOG Epoch[006/090], Step[0120/0312], Lr: 3.446154e+00, Loss: 2.0309 (1.9606), Avg Acc: 0.5682 +2022-03-23 18:23:08,896 MASTER_LOG Epoch[006/090], Step[0140/0312], Lr: 3.487179e+00, Loss: 1.8885 (1.9628), Avg Acc: 0.5678 +2022-03-23 18:23:57,965 MASTER_LOG Epoch[006/090], Step[0160/0312], Lr: 3.528205e+00, Loss: 1.9828 (1.9651), Avg Acc: 0.5676 +2022-03-23 18:24:46,842 MASTER_LOG Epoch[006/090], Step[0180/0312], Lr: 3.569231e+00, Loss: 1.9718 (1.9668), Avg Acc: 0.5674 +2022-03-23 18:25:37,370 MASTER_LOG Epoch[006/090], Step[0200/0312], Lr: 3.610256e+00, Loss: 1.9931 (1.9692), Avg Acc: 0.5671 +2022-03-23 18:26:26,272 MASTER_LOG Epoch[006/090], Step[0220/0312], Lr: 3.651282e+00, Loss: 1.9926 (1.9708), Avg Acc: 0.5667 +2022-03-23 18:27:14,380 MASTER_LOG Epoch[006/090], Step[0240/0312], Lr: 3.692308e+00, Loss: 1.9778 (1.9703), Avg Acc: 0.5667 +2022-03-23 18:28:02,335 MASTER_LOG Epoch[006/090], Step[0260/0312], Lr: 3.733333e+00, Loss: 1.9698 (1.9701), Avg Acc: 0.5669 +2022-03-23 18:28:49,586 MASTER_LOG Epoch[006/090], Step[0280/0312], Lr: 3.774359e+00, Loss: 2.0005 (1.9708), Avg Acc: 0.5668 +2022-03-23 18:29:37,070 MASTER_LOG Epoch[006/090], Step[0300/0312], Lr: 3.815385e+00, Loss: 1.9354 (1.9712), Avg Acc: 0.5668 +2022-03-23 18:30:05,555 MASTER_LOG Epoch[006/090], Step[0311/0312], Lr: 3.831795e+00, Loss: 1.9675 (1.9715), Avg Acc: 0.5667 +2022-03-23 18:30:08,052 MASTER_LOG ----- Epoch[006/090], Lr: 3.831795e+00, time: 766.27Train Loss: 1.9715, Train Acc: 0.5667 +2022-03-23 18:30:08,053 MASTER_LOG ----- Validation after Epoch: 6 +2022-03-23 18:30:17,520 MASTER_LOG Step[0000/0013], Avg Loss: 1.3391, Avg Acc@1: 0.6692, Avg Acc@5: 0.8872 +2022-03-23 18:30:52,336 MASTER_LOG ----- Epoch[006/090], Validation Loss: 1.6683, Validation Acc@1: 0.6104, Validation Acc@5: 0.8349, time: 44.28 +2022-03-23 18:30:52,337 MASTER_LOG Train epoch 7. LR=3.831795e+00 +2022-03-23 18:30:59,768 MASTER_LOG Epoch[007/090], Step[0000/0312], Lr: 3.840000e+00, Loss: 1.9089 (1.9089), Avg Acc: 0.5718 +2022-03-23 18:31:48,528 MASTER_LOG Epoch[007/090], Step[0020/0312], Lr: 3.881026e+00, Loss: 1.8937 (1.9345), Avg Acc: 0.5740 +2022-03-23 18:32:36,396 MASTER_LOG Epoch[007/090], Step[0040/0312], Lr: 3.922051e+00, Loss: 1.9312 (1.9355), Avg Acc: 0.5732 +2022-03-23 18:33:24,162 MASTER_LOG Epoch[007/090], Step[0060/0312], Lr: 3.963077e+00, Loss: 1.8898 (1.9373), Avg Acc: 0.5734 +2022-03-23 18:34:09,914 MASTER_LOG Epoch[007/090], Step[0080/0312], Lr: 4.004103e+00, Loss: 1.9330 (1.9451), Avg Acc: 0.5721 +2022-03-23 18:34:57,262 MASTER_LOG Epoch[007/090], Step[0100/0312], Lr: 4.045128e+00, Loss: 1.9476 (1.9478), Avg Acc: 0.5713 +2022-03-23 18:35:44,508 MASTER_LOG Epoch[007/090], Step[0120/0312], Lr: 4.086154e+00, Loss: 2.0406 (1.9505), Avg Acc: 0.5715 +2022-03-23 18:36:33,007 MASTER_LOG Epoch[007/090], Step[0140/0312], Lr: 4.127179e+00, Loss: 1.9690 (1.9527), Avg Acc: 0.5712 +2022-03-23 18:37:19,871 MASTER_LOG Epoch[007/090], Step[0160/0312], Lr: 4.168205e+00, Loss: 1.9940 (1.9568), Avg Acc: 0.5708 +2022-03-23 18:38:07,252 MASTER_LOG Epoch[007/090], Step[0180/0312], Lr: 4.209231e+00, Loss: 2.0022 (1.9591), Avg Acc: 0.5704 +2022-03-23 18:38:53,683 MASTER_LOG Epoch[007/090], Step[0200/0312], Lr: 4.250256e+00, Loss: 1.9668 (1.9613), Avg Acc: 0.5702 +2022-03-23 18:39:41,476 MASTER_LOG Epoch[007/090], Step[0220/0312], Lr: 4.291282e+00, Loss: 1.9307 (1.9622), Avg Acc: 0.5700 +2022-03-23 18:40:28,089 MASTER_LOG Epoch[007/090], Step[0240/0312], Lr: 4.332308e+00, Loss: 1.9802 (1.9629), Avg Acc: 0.5700 +2022-03-23 18:41:15,803 MASTER_LOG Epoch[007/090], Step[0260/0312], Lr: 4.373333e+00, Loss: 1.9879 (1.9639), Avg Acc: 0.5699 +2022-03-23 18:42:02,967 MASTER_LOG Epoch[007/090], Step[0280/0312], Lr: 4.414359e+00, Loss: 1.9997 (1.9647), Avg Acc: 0.5698 +2022-03-23 18:42:50,373 MASTER_LOG Epoch[007/090], Step[0300/0312], Lr: 4.455385e+00, Loss: 2.0135 (1.9663), Avg Acc: 0.5695 +2022-03-23 18:43:17,468 MASTER_LOG Epoch[007/090], Step[0311/0312], Lr: 4.471795e+00, Loss: 2.0479 (1.9670), Avg Acc: 0.5694 +2022-03-23 18:43:18,985 MASTER_LOG ----- Epoch[007/090], Lr: 4.471795e+00, time: 746.64Train Loss: 1.9670, Train Acc: 0.5694 +2022-03-23 18:43:18,985 MASTER_LOG ----- Validation after Epoch: 7 +2022-03-23 18:43:28,981 MASTER_LOG Step[0000/0013], Avg Loss: 1.3498, Avg Acc@1: 0.6665, Avg Acc@5: 0.8889 +2022-03-23 18:44:03,968 MASTER_LOG ----- Epoch[007/090], Validation Loss: 1.6847, Validation Acc@1: 0.6066, Validation Acc@5: 0.8333, time: 44.98 +2022-03-23 18:44:03,968 MASTER_LOG Train epoch 8. LR=4.471795e+00 +2022-03-23 18:44:11,385 MASTER_LOG Epoch[008/090], Step[0000/0312], Lr: 4.480000e+00, Loss: 1.9220 (1.9220), Avg Acc: 0.5820 +2022-03-23 18:44:59,945 MASTER_LOG Epoch[008/090], Step[0020/0312], Lr: 4.521026e+00, Loss: 1.9748 (1.9283), Avg Acc: 0.5739 +2022-03-23 18:45:48,443 MASTER_LOG Epoch[008/090], Step[0040/0312], Lr: 4.562051e+00, Loss: 1.9922 (1.9349), Avg Acc: 0.5727 +2022-03-23 18:46:35,541 MASTER_LOG Epoch[008/090], Step[0060/0312], Lr: 4.603077e+00, Loss: 1.9987 (1.9347), Avg Acc: 0.5731 +2022-03-23 18:47:23,778 MASTER_LOG Epoch[008/090], Step[0080/0312], Lr: 4.644103e+00, Loss: 1.9517 (1.9415), Avg Acc: 0.5728 +2022-03-23 18:48:11,672 MASTER_LOG Epoch[008/090], Step[0100/0312], Lr: 4.685128e+00, Loss: 2.1101 (1.9485), Avg Acc: 0.5719 +2022-03-23 18:48:59,875 MASTER_LOG Epoch[008/090], Step[0120/0312], Lr: 4.726154e+00, Loss: 1.9832 (1.9521), Avg Acc: 0.5718 +2022-03-23 18:49:48,702 MASTER_LOG Epoch[008/090], Step[0140/0312], Lr: 4.767179e+00, Loss: 2.0436 (1.9555), Avg Acc: 0.5712 +2022-03-23 18:50:37,095 MASTER_LOG Epoch[008/090], Step[0160/0312], Lr: 4.808205e+00, Loss: 1.8981 (1.9567), Avg Acc: 0.5712 +2022-03-23 18:51:24,224 MASTER_LOG Epoch[008/090], Step[0180/0312], Lr: 4.849231e+00, Loss: 1.9244 (1.9586), Avg Acc: 0.5710 +2022-03-23 18:52:12,147 MASTER_LOG Epoch[008/090], Step[0200/0312], Lr: 4.890256e+00, Loss: 1.9776 (1.9617), Avg Acc: 0.5706 +2022-03-23 18:52:59,311 MASTER_LOG Epoch[008/090], Step[0220/0312], Lr: 4.931282e+00, Loss: 1.9353 (1.9608), Avg Acc: 0.5708 +2022-03-23 18:53:46,599 MASTER_LOG Epoch[008/090], Step[0240/0312], Lr: 4.972308e+00, Loss: 1.9873 (1.9621), Avg Acc: 0.5705 +2022-03-23 18:54:34,561 MASTER_LOG Epoch[008/090], Step[0260/0312], Lr: 5.013333e+00, Loss: 2.0346 (1.9646), Avg Acc: 0.5702 +2022-03-23 18:55:22,271 MASTER_LOG Epoch[008/090], Step[0280/0312], Lr: 5.054359e+00, Loss: 1.9937 (1.9652), Avg Acc: 0.5700 +2022-03-23 18:56:10,719 MASTER_LOG Epoch[008/090], Step[0300/0312], Lr: 5.095385e+00, Loss: 2.0392 (1.9672), Avg Acc: 0.5698 +2022-03-23 18:56:38,115 MASTER_LOG Epoch[008/090], Step[0311/0312], Lr: 5.111795e+00, Loss: 1.8997 (1.9679), Avg Acc: 0.5697 +2022-03-23 18:56:41,882 MASTER_LOG ----- Epoch[008/090], Lr: 5.111795e+00, time: 757.85Train Loss: 1.9679, Train Acc: 0.5697 +2022-03-23 18:56:41,882 MASTER_LOG ----- Validation after Epoch: 8 +2022-03-23 18:56:51,498 MASTER_LOG Step[0000/0013], Avg Loss: 1.3126, Avg Acc@1: 0.6804, Avg Acc@5: 0.8872 +2022-03-23 18:57:27,581 MASTER_LOG ----- Epoch[008/090], Validation Loss: 1.6786, Validation Acc@1: 0.6125, Validation Acc@5: 0.8323, time: 45.70 +2022-03-23 18:57:27,581 MASTER_LOG Train epoch 9. LR=5.111795e+00 +2022-03-23 18:57:35,489 MASTER_LOG Epoch[009/090], Step[0000/0312], Lr: 5.120000e+00, Loss: 1.9738 (1.9738), Avg Acc: 0.5681 +2022-03-23 18:58:27,152 MASTER_LOG Epoch[009/090], Step[0020/0312], Lr: 5.161026e+00, Loss: 1.9118 (1.9394), Avg Acc: 0.5739 +2022-03-23 18:59:13,955 MASTER_LOG Epoch[009/090], Step[0040/0312], Lr: 5.202051e+00, Loss: 2.0332 (1.9442), Avg Acc: 0.5736 +2022-03-23 19:00:02,020 MASTER_LOG Epoch[009/090], Step[0060/0312], Lr: 5.243077e+00, Loss: 2.0133 (1.9440), Avg Acc: 0.5740 +2022-03-23 19:00:50,401 MASTER_LOG Epoch[009/090], Step[0080/0312], Lr: 5.284103e+00, Loss: 1.9586 (1.9533), Avg Acc: 0.5724 +2022-03-23 19:01:39,198 MASTER_LOG Epoch[009/090], Step[0100/0312], Lr: 5.325128e+00, Loss: 1.9244 (1.9562), Avg Acc: 0.5715 +2022-03-23 19:02:28,136 MASTER_LOG Epoch[009/090], Step[0120/0312], Lr: 5.366154e+00, Loss: 2.0224 (1.9611), Avg Acc: 0.5710 +2022-03-23 19:03:15,876 MASTER_LOG Epoch[009/090], Step[0140/0312], Lr: 5.407179e+00, Loss: 2.0974 (1.9666), Avg Acc: 0.5703 +2022-03-23 19:04:03,379 MASTER_LOG Epoch[009/090], Step[0160/0312], Lr: 5.448205e+00, Loss: 1.9680 (1.9666), Avg Acc: 0.5703 +2022-03-23 19:04:51,355 MASTER_LOG Epoch[009/090], Step[0180/0312], Lr: 5.489231e+00, Loss: 2.0121 (1.9676), Avg Acc: 0.5698 +2022-03-23 19:05:39,381 MASTER_LOG Epoch[009/090], Step[0200/0312], Lr: 5.530256e+00, Loss: 1.9885 (1.9704), Avg Acc: 0.5697 +2022-03-23 19:06:28,503 MASTER_LOG Epoch[009/090], Step[0220/0312], Lr: 5.571282e+00, Loss: 1.9638 (1.9721), Avg Acc: 0.5694 +2022-03-23 19:07:16,995 MASTER_LOG Epoch[009/090], Step[0240/0312], Lr: 5.612308e+00, Loss: 1.9483 (1.9715), Avg Acc: 0.5696 +2022-03-23 19:08:05,370 MASTER_LOG Epoch[009/090], Step[0260/0312], Lr: 5.653333e+00, Loss: 2.0017 (1.9725), Avg Acc: 0.5696 +2022-03-23 19:08:53,746 MASTER_LOG Epoch[009/090], Step[0280/0312], Lr: 5.694359e+00, Loss: 1.9450 (1.9744), Avg Acc: 0.5694 +2022-03-23 19:09:41,127 MASTER_LOG Epoch[009/090], Step[0300/0312], Lr: 5.735385e+00, Loss: 1.9227 (1.9739), Avg Acc: 0.5696 +2022-03-23 19:10:05,862 MASTER_LOG Epoch[009/090], Step[0311/0312], Lr: 5.751795e+00, Loss: 2.0063 (1.9745), Avg Acc: 0.5696 +2022-03-23 19:10:08,932 MASTER_LOG ----- Epoch[009/090], Lr: 5.751795e+00, time: 761.35Train Loss: 1.9745, Train Acc: 0.5696 +2022-03-23 19:10:08,932 MASTER_LOG ----- Validation after Epoch: 9 +2022-03-23 19:10:18,322 MASTER_LOG Step[0000/0013], Avg Loss: 1.3230, Avg Acc@1: 0.6697, Avg Acc@5: 0.8926 +2022-03-23 19:10:54,339 MASTER_LOG ----- Epoch[009/090], Validation Loss: 1.6816, Validation Acc@1: 0.6104, Validation Acc@5: 0.8350, time: 45.40 +2022-03-23 19:10:54,339 MASTER_LOG Train epoch 10. LR=5.751795e+00 +2022-03-23 19:11:02,176 MASTER_LOG Epoch[010/090], Step[0000/0312], Lr: 5.760000e+00, Loss: 1.9811 (1.9811), Avg Acc: 0.5703 +2022-03-23 19:11:51,349 MASTER_LOG Epoch[010/090], Step[0020/0312], Lr: 5.801026e+00, Loss: 1.9588 (1.9405), Avg Acc: 0.5732 +2022-03-23 19:12:39,033 MASTER_LOG Epoch[010/090], Step[0040/0312], Lr: 5.842051e+00, Loss: 2.0036 (1.9439), Avg Acc: 0.5741 +2022-03-23 19:13:26,987 MASTER_LOG Epoch[010/090], Step[0060/0312], Lr: 5.883077e+00, Loss: 1.9953 (1.9462), Avg Acc: 0.5749 +2022-03-23 19:14:14,383 MASTER_LOG Epoch[010/090], Step[0080/0312], Lr: 5.924103e+00, Loss: 1.9743 (1.9489), Avg Acc: 0.5744 +2022-03-23 19:15:02,268 MASTER_LOG Epoch[010/090], Step[0100/0312], Lr: 5.965128e+00, Loss: 2.0248 (1.9540), Avg Acc: 0.5736 +2022-03-23 19:15:50,736 MASTER_LOG Epoch[010/090], Step[0120/0312], Lr: 6.006154e+00, Loss: 2.0233 (1.9591), Avg Acc: 0.5728 +2022-03-23 19:16:38,736 MASTER_LOG Epoch[010/090], Step[0140/0312], Lr: 6.047179e+00, Loss: 1.9522 (1.9606), Avg Acc: 0.5723 +2022-03-23 19:17:26,780 MASTER_LOG Epoch[010/090], Step[0160/0312], Lr: 6.088205e+00, Loss: 1.9826 (1.9628), Avg Acc: 0.5717 +2022-03-23 19:18:13,751 MASTER_LOG Epoch[010/090], Step[0180/0312], Lr: 6.129231e+00, Loss: 1.8861 (1.9641), Avg Acc: 0.5714 +2022-03-23 19:19:01,280 MASTER_LOG Epoch[010/090], Step[0200/0312], Lr: 6.170256e+00, Loss: 2.1194 (1.9655), Avg Acc: 0.5711 +2022-03-23 19:19:49,253 MASTER_LOG Epoch[010/090], Step[0220/0312], Lr: 6.211282e+00, Loss: 2.0323 (1.9679), Avg Acc: 0.5709 +2022-03-23 19:20:37,390 MASTER_LOG Epoch[010/090], Step[0240/0312], Lr: 6.252308e+00, Loss: 2.0445 (1.9692), Avg Acc: 0.5708 +2022-03-23 19:21:25,783 MASTER_LOG Epoch[010/090], Step[0260/0312], Lr: 6.293333e+00, Loss: 1.9631 (1.9703), Avg Acc: 0.5706 +2022-03-23 19:22:14,360 MASTER_LOG Epoch[010/090], Step[0280/0312], Lr: 6.334359e+00, Loss: 1.9837 (1.9719), Avg Acc: 0.5702 +2022-03-23 19:23:01,660 MASTER_LOG Epoch[010/090], Step[0300/0312], Lr: 6.375385e+00, Loss: 1.9544 (1.9737), Avg Acc: 0.5700 +2022-03-23 19:23:25,722 MASTER_LOG Epoch[010/090], Step[0311/0312], Lr: 6.391795e+00, Loss: 2.0441 (1.9748), Avg Acc: 0.5699 +2022-03-23 19:23:27,854 MASTER_LOG ----- Epoch[010/090], Lr: 6.391795e+00, time: 752.45Train Loss: 1.9748, Train Acc: 0.5699 +2022-03-23 19:23:27,854 MASTER_LOG ----- Validation after Epoch: 10 +2022-03-23 19:23:37,539 MASTER_LOG Step[0000/0013], Avg Loss: 1.3566, Avg Acc@1: 0.6753, Avg Acc@5: 0.8889 +2022-03-23 19:24:12,907 MASTER_LOG ----- Epoch[010/090], Validation Loss: 1.6874, Validation Acc@1: 0.6084, Validation Acc@5: 0.8349, time: 45.05 +2022-03-23 19:24:13,842 MASTER_LOG ----- Save model: ./output/linearprobe-20220323-17-11/LINEARPROBE-Epoch-10-Loss-1.6873605159378051.pdparams +2022-03-23 19:24:13,842 MASTER_LOG Train epoch 11. LR=6.391795e+00 +2022-03-23 19:24:20,976 MASTER_LOG Epoch[011/090], Step[0000/0312], Lr: 6.400000e+00, Loss: 1.9267 (1.9267), Avg Acc: 0.5764 +2022-03-23 19:25:08,853 MASTER_LOG Epoch[011/090], Step[0020/0312], Lr: 6.399990e+00, Loss: 1.9046 (1.9351), Avg Acc: 0.5773 +2022-03-23 19:25:56,867 MASTER_LOG Epoch[011/090], Step[0040/0312], Lr: 6.399959e+00, Loss: 1.9576 (1.9370), Avg Acc: 0.5764 +2022-03-23 19:26:43,725 MASTER_LOG Epoch[011/090], Step[0060/0312], Lr: 6.399909e+00, Loss: 1.9133 (1.9371), Avg Acc: 0.5763 +2022-03-23 19:27:31,782 MASTER_LOG Epoch[011/090], Step[0080/0312], Lr: 6.399838e+00, Loss: 1.9721 (1.9435), Avg Acc: 0.5754 +2022-03-23 19:28:19,758 MASTER_LOG Epoch[011/090], Step[0100/0312], Lr: 6.399747e+00, Loss: 1.9133 (1.9431), Avg Acc: 0.5755 +2022-03-23 19:29:07,513 MASTER_LOG Epoch[011/090], Step[0120/0312], Lr: 6.399635e+00, Loss: 1.9686 (1.9452), Avg Acc: 0.5750 +2022-03-23 19:29:53,615 MASTER_LOG Epoch[011/090], Step[0140/0312], Lr: 6.399503e+00, Loss: 1.9385 (1.9446), Avg Acc: 0.5749 +2022-03-23 19:30:42,881 MASTER_LOG Epoch[011/090], Step[0160/0312], Lr: 6.399351e+00, Loss: 1.9348 (1.9454), Avg Acc: 0.5748 +2022-03-23 19:31:29,793 MASTER_LOG Epoch[011/090], Step[0180/0312], Lr: 6.399179e+00, Loss: 1.9315 (1.9463), Avg Acc: 0.5744 +2022-03-23 19:32:16,837 MASTER_LOG Epoch[011/090], Step[0200/0312], Lr: 6.398986e+00, Loss: 1.9348 (1.9478), Avg Acc: 0.5743 +2022-03-23 19:33:04,313 MASTER_LOG Epoch[011/090], Step[0220/0312], Lr: 6.398773e+00, Loss: 1.9564 (1.9496), Avg Acc: 0.5740 +2022-03-23 19:33:52,415 MASTER_LOG Epoch[011/090], Step[0240/0312], Lr: 6.398540e+00, Loss: 1.9416 (1.9496), Avg Acc: 0.5738 +2022-03-23 19:34:39,618 MASTER_LOG Epoch[011/090], Step[0260/0312], Lr: 6.398287e+00, Loss: 1.9005 (1.9498), Avg Acc: 0.5739 +2022-03-23 19:35:27,701 MASTER_LOG Epoch[011/090], Step[0280/0312], Lr: 6.398013e+00, Loss: 1.8958 (1.9496), Avg Acc: 0.5739 +2022-03-23 19:36:15,086 MASTER_LOG Epoch[011/090], Step[0300/0312], Lr: 6.397719e+00, Loss: 1.8763 (1.9488), Avg Acc: 0.5741 +2022-03-23 19:36:42,982 MASTER_LOG Epoch[011/090], Step[0311/0312], Lr: 6.397596e+00, Loss: 1.9948 (1.9486), Avg Acc: 0.5741 +2022-03-23 19:36:44,986 MASTER_LOG ----- Epoch[011/090], Lr: 6.397596e+00, time: 750.56Train Loss: 1.9486, Train Acc: 0.5741 +2022-03-23 19:36:44,986 MASTER_LOG ----- Validation after Epoch: 11 +2022-03-23 19:36:54,640 MASTER_LOG Step[0000/0013], Avg Loss: 1.2828, Avg Acc@1: 0.6833, Avg Acc@5: 0.8860 +2022-03-23 19:37:29,323 MASTER_LOG ----- Epoch[011/090], Validation Loss: 1.6456, Validation Acc@1: 0.6165, Validation Acc@5: 0.8388, time: 44.33 +2022-03-23 19:37:29,324 MASTER_LOG Train epoch 12. LR=6.397596e+00 +2022-03-23 19:37:37,268 MASTER_LOG Epoch[012/090], Step[0000/0312], Lr: 6.397533e+00, Loss: 1.8674 (1.8674), Avg Acc: 0.5964 +2022-03-23 19:38:27,243 MASTER_LOG Epoch[012/090], Step[0020/0312], Lr: 6.397207e+00, Loss: 1.9347 (1.8849), Avg Acc: 0.5854 +2022-03-23 19:39:15,060 MASTER_LOG Epoch[012/090], Step[0040/0312], Lr: 6.396860e+00, Loss: 1.8737 (1.8910), Avg Acc: 0.5832 +2022-03-23 19:40:02,443 MASTER_LOG Epoch[012/090], Step[0060/0312], Lr: 6.396493e+00, Loss: 1.9122 (1.8962), Avg Acc: 0.5830 +2022-03-23 19:40:51,822 MASTER_LOG Epoch[012/090], Step[0080/0312], Lr: 6.396106e+00, Loss: 1.9316 (1.8928), Avg Acc: 0.5835 +2022-03-23 19:41:40,699 MASTER_LOG Epoch[012/090], Step[0100/0312], Lr: 6.395698e+00, Loss: 1.8575 (1.8982), Avg Acc: 0.5827 +2022-03-23 19:42:28,459 MASTER_LOG Epoch[012/090], Step[0120/0312], Lr: 6.395271e+00, Loss: 1.8704 (1.9004), Avg Acc: 0.5824 +2022-03-23 19:43:17,128 MASTER_LOG Epoch[012/090], Step[0140/0312], Lr: 6.394823e+00, Loss: 1.9260 (1.9033), Avg Acc: 0.5820 +2022-03-23 19:44:04,466 MASTER_LOG Epoch[012/090], Step[0160/0312], Lr: 6.394355e+00, Loss: 1.9274 (1.9040), Avg Acc: 0.5818 +2022-03-23 19:44:51,942 MASTER_LOG Epoch[012/090], Step[0180/0312], Lr: 6.393866e+00, Loss: 1.9152 (1.9079), Avg Acc: 0.5811 +2022-03-23 19:45:39,109 MASTER_LOG Epoch[012/090], Step[0200/0312], Lr: 6.393358e+00, Loss: 1.8977 (1.9091), Avg Acc: 0.5808 +2022-03-23 19:46:25,603 MASTER_LOG Epoch[012/090], Step[0220/0312], Lr: 6.392829e+00, Loss: 1.9236 (1.9088), Avg Acc: 0.5805 +2022-03-23 19:47:13,484 MASTER_LOG Epoch[012/090], Step[0240/0312], Lr: 6.392280e+00, Loss: 1.9516 (1.9102), Avg Acc: 0.5803 +2022-03-23 19:48:00,756 MASTER_LOG Epoch[012/090], Step[0260/0312], Lr: 6.391710e+00, Loss: 1.9077 (1.9103), Avg Acc: 0.5802 +2022-03-23 19:48:47,520 MASTER_LOG Epoch[012/090], Step[0280/0312], Lr: 6.391121e+00, Loss: 1.9250 (1.9102), Avg Acc: 0.5804 +2022-03-23 19:49:34,054 MASTER_LOG Epoch[012/090], Step[0300/0312], Lr: 6.390511e+00, Loss: 1.9672 (1.9114), Avg Acc: 0.5802 +2022-03-23 19:50:00,740 MASTER_LOG Epoch[012/090], Step[0311/0312], Lr: 6.390261e+00, Loss: 1.9579 (1.9118), Avg Acc: 0.5802 +2022-03-23 19:50:03,042 MASTER_LOG ----- Epoch[012/090], Lr: 6.390261e+00, time: 753.65Train Loss: 1.9118, Train Acc: 0.5802 +2022-03-23 19:50:03,042 MASTER_LOG ----- Validation after Epoch: 12 +2022-03-23 19:50:12,547 MASTER_LOG Step[0000/0013], Avg Loss: 1.2312, Avg Acc@1: 0.6909, Avg Acc@5: 0.8936 +2022-03-23 19:50:48,825 MASTER_LOG ----- Epoch[012/090], Validation Loss: 1.6169, Validation Acc@1: 0.6248, Validation Acc@5: 0.8394, time: 45.78 +2022-03-23 19:50:48,825 MASTER_LOG Train epoch 13. LR=6.390261e+00 +2022-03-23 19:50:56,169 MASTER_LOG Epoch[013/090], Step[0000/0312], Lr: 6.390135e+00, Loss: 1.7806 (1.7806), Avg Acc: 0.6003 +2022-03-23 19:51:45,457 MASTER_LOG Epoch[013/090], Step[0020/0312], Lr: 6.389493e+00, Loss: 1.8600 (1.8455), Avg Acc: 0.5917 +2022-03-23 19:52:32,690 MASTER_LOG Epoch[013/090], Step[0040/0312], Lr: 6.388831e+00, Loss: 1.8242 (1.8494), Avg Acc: 0.5902 +2022-03-23 19:53:20,785 MASTER_LOG Epoch[013/090], Step[0060/0312], Lr: 6.388148e+00, Loss: 1.8588 (1.8594), Avg Acc: 0.5881 +2022-03-23 19:54:08,357 MASTER_LOG Epoch[013/090], Step[0080/0312], Lr: 6.387446e+00, Loss: 1.8359 (1.8626), Avg Acc: 0.5879 +2022-03-23 19:54:56,150 MASTER_LOG Epoch[013/090], Step[0100/0312], Lr: 6.386723e+00, Loss: 1.8936 (1.8641), Avg Acc: 0.5880 +2022-03-23 19:55:43,479 MASTER_LOG Epoch[013/090], Step[0120/0312], Lr: 6.385980e+00, Loss: 1.8148 (1.8665), Avg Acc: 0.5879 +2022-03-23 19:56:31,705 MASTER_LOG Epoch[013/090], Step[0140/0312], Lr: 6.385216e+00, Loss: 1.9487 (1.8713), Avg Acc: 0.5869 +2022-03-23 19:57:19,358 MASTER_LOG Epoch[013/090], Step[0160/0312], Lr: 6.384433e+00, Loss: 1.8823 (1.8735), Avg Acc: 0.5864 +2022-03-23 19:58:07,391 MASTER_LOG Epoch[013/090], Step[0180/0312], Lr: 6.383629e+00, Loss: 1.7822 (1.8740), Avg Acc: 0.5863 +2022-03-23 19:58:54,818 MASTER_LOG Epoch[013/090], Step[0200/0312], Lr: 6.382805e+00, Loss: 1.8696 (1.8754), Avg Acc: 0.5858 +2022-03-23 19:59:42,340 MASTER_LOG Epoch[013/090], Step[0220/0312], Lr: 6.381961e+00, Loss: 1.9471 (1.8769), Avg Acc: 0.5856 +2022-03-23 20:00:29,417 MASTER_LOG Epoch[013/090], Step[0240/0312], Lr: 6.381097e+00, Loss: 1.8829 (1.8777), Avg Acc: 0.5854 +2022-03-23 20:01:16,663 MASTER_LOG Epoch[013/090], Step[0260/0312], Lr: 6.380213e+00, Loss: 1.8602 (1.8775), Avg Acc: 0.5855 +2022-03-23 20:02:04,954 MASTER_LOG Epoch[013/090], Step[0280/0312], Lr: 6.379308e+00, Loss: 1.8996 (1.8776), Avg Acc: 0.5856 +2022-03-23 20:02:52,912 MASTER_LOG Epoch[013/090], Step[0300/0312], Lr: 6.378384e+00, Loss: 1.8973 (1.8777), Avg Acc: 0.5856 +2022-03-23 20:03:18,219 MASTER_LOG Epoch[013/090], Step[0311/0312], Lr: 6.378008e+00, Loss: 1.8635 (1.8773), Avg Acc: 0.5856 +2022-03-23 20:03:20,292 MASTER_LOG ----- Epoch[013/090], Lr: 6.378008e+00, time: 751.46Train Loss: 1.8773, Train Acc: 0.5856 +2022-03-23 20:03:20,292 MASTER_LOG ----- Validation after Epoch: 13 +2022-03-23 20:03:30,029 MASTER_LOG Step[0000/0013], Avg Loss: 1.2380, Avg Acc@1: 0.6953, Avg Acc@5: 0.8962 +2022-03-23 20:04:04,209 MASTER_LOG ----- Epoch[013/090], Validation Loss: 1.5961, Validation Acc@1: 0.6255, Validation Acc@5: 0.8436, time: 43.91 +2022-03-23 20:04:04,210 MASTER_LOG Train epoch 14. LR=6.378008e+00 +2022-03-23 20:04:11,970 MASTER_LOG Epoch[014/090], Step[0000/0312], Lr: 6.377819e+00, Loss: 1.8118 (1.8118), Avg Acc: 0.5911 +2022-03-23 20:05:01,565 MASTER_LOG Epoch[014/090], Step[0020/0312], Lr: 6.376862e+00, Loss: 1.7704 (1.8262), Avg Acc: 0.5940 +2022-03-23 20:05:50,357 MASTER_LOG Epoch[014/090], Step[0040/0312], Lr: 6.375885e+00, Loss: 1.8377 (1.8226), Avg Acc: 0.5945 +2022-03-23 20:06:38,567 MASTER_LOG Epoch[014/090], Step[0060/0312], Lr: 6.374888e+00, Loss: 1.8551 (1.8296), Avg Acc: 0.5933 +2022-03-23 20:07:26,638 MASTER_LOG Epoch[014/090], Step[0080/0312], Lr: 6.373871e+00, Loss: 1.9680 (1.8363), Avg Acc: 0.5927 +2022-03-23 20:08:13,843 MASTER_LOG Epoch[014/090], Step[0100/0312], Lr: 6.372833e+00, Loss: 1.8188 (1.8341), Avg Acc: 0.5931 +2022-03-23 20:09:02,356 MASTER_LOG Epoch[014/090], Step[0120/0312], Lr: 6.371776e+00, Loss: 1.7792 (1.8363), Avg Acc: 0.5931 +2022-03-23 20:09:50,217 MASTER_LOG Epoch[014/090], Step[0140/0312], Lr: 6.370698e+00, Loss: 1.9810 (1.8397), Avg Acc: 0.5925 +2022-03-23 20:10:37,390 MASTER_LOG Epoch[014/090], Step[0160/0312], Lr: 6.369601e+00, Loss: 1.8568 (1.8431), Avg Acc: 0.5918 +2022-03-23 20:11:24,613 MASTER_LOG Epoch[014/090], Step[0180/0312], Lr: 6.368483e+00, Loss: 1.7800 (1.8441), Avg Acc: 0.5917 +2022-03-23 20:12:11,954 MASTER_LOG Epoch[014/090], Step[0200/0312], Lr: 6.367345e+00, Loss: 1.9335 (1.8458), Avg Acc: 0.5915 +2022-03-23 20:13:01,049 MASTER_LOG Epoch[014/090], Step[0220/0312], Lr: 6.366187e+00, Loss: 1.9046 (1.8477), Avg Acc: 0.5911 +2022-03-23 20:13:49,727 MASTER_LOG Epoch[014/090], Step[0240/0312], Lr: 6.365009e+00, Loss: 1.8469 (1.8478), Avg Acc: 0.5911 +2022-03-23 20:14:37,162 MASTER_LOG Epoch[014/090], Step[0260/0312], Lr: 6.363811e+00, Loss: 1.8067 (1.8487), Avg Acc: 0.5909 +2022-03-23 20:15:25,063 MASTER_LOG Epoch[014/090], Step[0280/0312], Lr: 6.362593e+00, Loss: 1.8619 (1.8498), Avg Acc: 0.5907 +2022-03-23 20:16:13,612 MASTER_LOG Epoch[014/090], Step[0300/0312], Lr: 6.361355e+00, Loss: 1.8062 (1.8506), Avg Acc: 0.5905 +2022-03-23 20:16:37,833 MASTER_LOG Epoch[014/090], Step[0311/0312], Lr: 6.360854e+00, Loss: 1.8878 (1.8512), Avg Acc: 0.5905 +2022-03-23 20:16:40,197 MASTER_LOG ----- Epoch[014/090], Lr: 6.360854e+00, time: 755.96Train Loss: 1.8512, Train Acc: 0.5905 +2022-03-23 20:16:40,197 MASTER_LOG ----- Validation after Epoch: 14 +2022-03-23 20:16:49,756 MASTER_LOG Step[0000/0013], Avg Loss: 1.2487, Avg Acc@1: 0.6885, Avg Acc@5: 0.8977 +2022-03-23 20:17:25,390 MASTER_LOG ----- Epoch[014/090], Validation Loss: 1.5789, Validation Acc@1: 0.6288, Validation Acc@5: 0.8470, time: 45.19 +2022-03-23 20:17:25,390 MASTER_LOG Train epoch 15. LR=6.360854e+00 +2022-03-23 20:17:33,369 MASTER_LOG Epoch[015/090], Step[0000/0312], Lr: 6.360603e+00, Loss: 1.7248 (1.7248), Avg Acc: 0.6096 +2022-03-23 20:18:23,044 MASTER_LOG Epoch[015/090], Step[0020/0312], Lr: 6.359333e+00, Loss: 1.8203 (1.8114), Avg Acc: 0.5978 +2022-03-23 20:19:11,393 MASTER_LOG Epoch[015/090], Step[0040/0312], Lr: 6.358042e+00, Loss: 1.7478 (1.8130), Avg Acc: 0.5982 +2022-03-23 20:20:00,206 MASTER_LOG Epoch[015/090], Step[0060/0312], Lr: 6.356732e+00, Loss: 1.7485 (1.8148), Avg Acc: 0.5973 +2022-03-23 20:20:48,498 MASTER_LOG Epoch[015/090], Step[0080/0312], Lr: 6.355402e+00, Loss: 1.7948 (1.8200), Avg Acc: 0.5963 +2022-03-23 20:21:35,331 MASTER_LOG Epoch[015/090], Step[0100/0312], Lr: 6.354052e+00, Loss: 1.8764 (1.8213), Avg Acc: 0.5960 +2022-03-23 20:22:22,198 MASTER_LOG Epoch[015/090], Step[0120/0312], Lr: 6.352682e+00, Loss: 1.8772 (1.8229), Avg Acc: 0.5954 +2022-03-23 20:23:10,600 MASTER_LOG Epoch[015/090], Step[0140/0312], Lr: 6.351292e+00, Loss: 1.8091 (1.8223), Avg Acc: 0.5958 +2022-03-23 20:23:59,499 MASTER_LOG Epoch[015/090], Step[0160/0312], Lr: 6.349881e+00, Loss: 1.8063 (1.8241), Avg Acc: 0.5954 +2022-03-23 20:24:46,609 MASTER_LOG Epoch[015/090], Step[0180/0312], Lr: 6.348451e+00, Loss: 1.7346 (1.8246), Avg Acc: 0.5954 +2022-03-23 20:25:34,258 MASTER_LOG Epoch[015/090], Step[0200/0312], Lr: 6.347001e+00, Loss: 1.7634 (1.8258), Avg Acc: 0.5953 +2022-03-23 20:26:20,518 MASTER_LOG Epoch[015/090], Step[0220/0312], Lr: 6.345531e+00, Loss: 1.8289 (1.8274), Avg Acc: 0.5949 +2022-03-23 20:27:08,007 MASTER_LOG Epoch[015/090], Step[0240/0312], Lr: 6.344041e+00, Loss: 1.8394 (1.8277), Avg Acc: 0.5950 +2022-03-23 20:27:55,874 MASTER_LOG Epoch[015/090], Step[0260/0312], Lr: 6.342532e+00, Loss: 1.9220 (1.8296), Avg Acc: 0.5945 +2022-03-23 20:28:45,421 MASTER_LOG Epoch[015/090], Step[0280/0312], Lr: 6.341002e+00, Loss: 1.8444 (1.8295), Avg Acc: 0.5944 +2022-03-23 20:29:33,499 MASTER_LOG Epoch[015/090], Step[0300/0312], Lr: 6.339452e+00, Loss: 1.7443 (1.8306), Avg Acc: 0.5941 +2022-03-23 20:29:57,476 MASTER_LOG Epoch[015/090], Step[0311/0312], Lr: 6.338827e+00, Loss: 1.8068 (1.8311), Avg Acc: 0.5940 +2022-03-23 20:30:00,079 MASTER_LOG ----- Epoch[015/090], Lr: 6.338827e+00, time: 754.62Train Loss: 1.8311, Train Acc: 0.5940 +2022-03-23 20:30:00,079 MASTER_LOG ----- Validation after Epoch: 15 +2022-03-23 20:30:09,703 MASTER_LOG Step[0000/0013], Avg Loss: 1.2546, Avg Acc@1: 0.6833, Avg Acc@5: 0.8970 +2022-03-23 20:30:44,103 MASTER_LOG ----- Epoch[015/090], Validation Loss: 1.5601, Validation Acc@1: 0.6327, Validation Acc@5: 0.8483, time: 44.02 +2022-03-23 20:30:44,104 MASTER_LOG Train epoch 16. LR=6.338827e+00 +2022-03-23 20:30:51,567 MASTER_LOG Epoch[016/090], Step[0000/0312], Lr: 6.338513e+00, Loss: 1.8000 (1.8000), Avg Acc: 0.6038 +2022-03-23 20:31:40,302 MASTER_LOG Epoch[016/090], Step[0020/0312], Lr: 6.336931e+00, Loss: 1.8106 (1.7873), Avg Acc: 0.6022 +2022-03-23 20:32:28,795 MASTER_LOG Epoch[016/090], Step[0040/0312], Lr: 6.335330e+00, Loss: 1.8351 (1.7942), Avg Acc: 0.6008 +2022-03-23 20:33:15,444 MASTER_LOG Epoch[016/090], Step[0060/0312], Lr: 6.333709e+00, Loss: 1.7562 (1.7971), Avg Acc: 0.6004 +2022-03-23 20:34:02,536 MASTER_LOG Epoch[016/090], Step[0080/0312], Lr: 6.332068e+00, Loss: 1.8773 (1.8028), Avg Acc: 0.5993 +2022-03-23 20:34:49,342 MASTER_LOG Epoch[016/090], Step[0100/0312], Lr: 6.330407e+00, Loss: 1.8464 (1.8045), Avg Acc: 0.5989 +2022-03-23 20:35:36,534 MASTER_LOG Epoch[016/090], Step[0120/0312], Lr: 6.328726e+00, Loss: 1.8631 (1.8054), Avg Acc: 0.5990 +2022-03-23 20:36:26,081 MASTER_LOG Epoch[016/090], Step[0140/0312], Lr: 6.327026e+00, Loss: 1.7449 (1.8048), Avg Acc: 0.5990 +2022-03-23 20:37:14,640 MASTER_LOG Epoch[016/090], Step[0160/0312], Lr: 6.325305e+00, Loss: 1.7647 (1.8044), Avg Acc: 0.5989 +2022-03-23 20:38:01,642 MASTER_LOG Epoch[016/090], Step[0180/0312], Lr: 6.323565e+00, Loss: 1.8593 (1.8063), Avg Acc: 0.5984 +2022-03-23 20:38:49,399 MASTER_LOG Epoch[016/090], Step[0200/0312], Lr: 6.321805e+00, Loss: 1.8039 (1.8077), Avg Acc: 0.5979 +2022-03-23 20:39:37,607 MASTER_LOG Epoch[016/090], Step[0220/0312], Lr: 6.320025e+00, Loss: 1.8422 (1.8079), Avg Acc: 0.5980 +2022-03-23 20:40:25,830 MASTER_LOG Epoch[016/090], Step[0240/0312], Lr: 6.318226e+00, Loss: 1.8059 (1.8085), Avg Acc: 0.5978 +2022-03-23 20:41:13,189 MASTER_LOG Epoch[016/090], Step[0260/0312], Lr: 6.316406e+00, Loss: 1.8920 (1.8090), Avg Acc: 0.5978 +2022-03-23 20:42:01,783 MASTER_LOG Epoch[016/090], Step[0280/0312], Lr: 6.314567e+00, Loss: 1.8680 (1.8109), Avg Acc: 0.5974 +2022-03-23 20:42:49,418 MASTER_LOG Epoch[016/090], Step[0300/0312], Lr: 6.312708e+00, Loss: 1.8260 (1.8133), Avg Acc: 0.5970 +2022-03-23 20:43:13,592 MASTER_LOG Epoch[016/090], Step[0311/0312], Lr: 6.311959e+00, Loss: 1.8062 (1.8135), Avg Acc: 0.5969 +2022-03-23 20:43:16,076 MASTER_LOG ----- Epoch[016/090], Lr: 6.311959e+00, time: 751.93Train Loss: 1.8135, Train Acc: 0.5969 +2022-03-23 20:43:16,076 MASTER_LOG ----- Validation after Epoch: 16 +2022-03-23 20:43:25,606 MASTER_LOG Step[0000/0013], Avg Loss: 1.2228, Avg Acc@1: 0.6968, Avg Acc@5: 0.8933 +2022-03-23 20:43:59,423 MASTER_LOG ----- Epoch[016/090], Validation Loss: 1.5559, Validation Acc@1: 0.6336, Validation Acc@5: 0.8491, time: 43.34 +2022-03-23 20:43:59,424 MASTER_LOG Train epoch 17. LR=6.311959e+00 +2022-03-23 20:44:06,902 MASTER_LOG Epoch[017/090], Step[0000/0312], Lr: 6.311584e+00, Loss: 1.7571 (1.7571), Avg Acc: 0.6069 +2022-03-23 20:44:55,141 MASTER_LOG Epoch[017/090], Step[0020/0312], Lr: 6.309693e+00, Loss: 1.7478 (1.7928), Avg Acc: 0.6005 +2022-03-23 20:45:42,573 MASTER_LOG Epoch[017/090], Step[0040/0312], Lr: 6.307783e+00, Loss: 1.7913 (1.7867), Avg Acc: 0.6017 +2022-03-23 20:46:30,850 MASTER_LOG Epoch[017/090], Step[0060/0312], Lr: 6.305854e+00, Loss: 1.8293 (1.7863), Avg Acc: 0.6012 +2022-03-23 20:47:18,309 MASTER_LOG Epoch[017/090], Step[0080/0312], Lr: 6.303904e+00, Loss: 1.8258 (1.7845), Avg Acc: 0.6015 +2022-03-23 20:48:05,797 MASTER_LOG Epoch[017/090], Step[0100/0312], Lr: 6.301935e+00, Loss: 1.7645 (1.7847), Avg Acc: 0.6017 +2022-03-23 20:48:53,097 MASTER_LOG Epoch[017/090], Step[0120/0312], Lr: 6.299946e+00, Loss: 1.8011 (1.7883), Avg Acc: 0.6009 +2022-03-23 20:49:39,947 MASTER_LOG Epoch[017/090], Step[0140/0312], Lr: 6.297938e+00, Loss: 1.8073 (1.7917), Avg Acc: 0.6004 +2022-03-23 20:50:26,695 MASTER_LOG Epoch[017/090], Step[0160/0312], Lr: 6.295910e+00, Loss: 1.8019 (1.7931), Avg Acc: 0.6001 +2022-03-23 20:51:13,743 MASTER_LOG Epoch[017/090], Step[0180/0312], Lr: 6.293862e+00, Loss: 1.7651 (1.7948), Avg Acc: 0.5998 +2022-03-23 20:52:01,301 MASTER_LOG Epoch[017/090], Step[0200/0312], Lr: 6.291795e+00, Loss: 1.7256 (1.7951), Avg Acc: 0.5997 +2022-03-23 20:52:48,857 MASTER_LOG Epoch[017/090], Step[0220/0312], Lr: 6.289708e+00, Loss: 1.6860 (1.7972), Avg Acc: 0.5995 +2022-03-23 20:53:36,456 MASTER_LOG Epoch[017/090], Step[0240/0312], Lr: 6.287602e+00, Loss: 1.7771 (1.7983), Avg Acc: 0.5994 +2022-03-23 20:54:23,696 MASTER_LOG Epoch[017/090], Step[0260/0312], Lr: 6.285476e+00, Loss: 1.8034 (1.8003), Avg Acc: 0.5991 +2022-03-23 20:55:12,085 MASTER_LOG Epoch[017/090], Step[0280/0312], Lr: 6.283330e+00, Loss: 1.8201 (1.7999), Avg Acc: 0.5993 +2022-03-23 20:56:01,202 MASTER_LOG Epoch[017/090], Step[0300/0312], Lr: 6.281165e+00, Loss: 1.7719 (1.8012), Avg Acc: 0.5991 +2022-03-23 20:56:25,985 MASTER_LOG Epoch[017/090], Step[0311/0312], Lr: 6.280294e+00, Loss: 1.8351 (1.8015), Avg Acc: 0.5990 +2022-03-23 20:56:28,174 MASTER_LOG ----- Epoch[017/090], Lr: 6.280294e+00, time: 747.51Train Loss: 1.8015, Train Acc: 0.5990 +2022-03-23 20:56:28,174 MASTER_LOG ----- Validation after Epoch: 17 +2022-03-23 20:56:37,729 MASTER_LOG Step[0000/0013], Avg Loss: 1.1828, Avg Acc@1: 0.7012, Avg Acc@5: 0.8999 +2022-03-23 20:57:13,141 MASTER_LOG ----- Epoch[017/090], Validation Loss: 1.5399, Validation Acc@1: 0.6373, Validation Acc@5: 0.8508, time: 44.96 +2022-03-23 20:57:13,141 MASTER_LOG Train epoch 18. LR=6.280294e+00 +2022-03-23 20:57:21,053 MASTER_LOG Epoch[018/090], Step[0000/0312], Lr: 6.279857e+00, Loss: 1.6889 (1.6889), Avg Acc: 0.6179 +2022-03-23 20:58:09,312 MASTER_LOG Epoch[018/090], Step[0020/0312], Lr: 6.277660e+00, Loss: 1.8207 (1.7733), Avg Acc: 0.6061 +2022-03-23 20:58:57,552 MASTER_LOG Epoch[018/090], Step[0040/0312], Lr: 6.275445e+00, Loss: 1.7266 (1.7639), Avg Acc: 0.6061 +2022-03-23 20:59:45,878 MASTER_LOG Epoch[018/090], Step[0060/0312], Lr: 6.273209e+00, Loss: 1.7479 (1.7669), Avg Acc: 0.6056 +2022-03-23 21:00:33,339 MASTER_LOG Epoch[018/090], Step[0080/0312], Lr: 6.270955e+00, Loss: 1.7447 (1.7705), Avg Acc: 0.6051 +2022-03-23 21:01:21,499 MASTER_LOG Epoch[018/090], Step[0100/0312], Lr: 6.268680e+00, Loss: 1.7214 (1.7703), Avg Acc: 0.6045 +2022-03-23 21:02:08,502 MASTER_LOG Epoch[018/090], Step[0120/0312], Lr: 6.266387e+00, Loss: 1.7730 (1.7729), Avg Acc: 0.6041 +2022-03-23 21:02:55,601 MASTER_LOG Epoch[018/090], Step[0140/0312], Lr: 6.264074e+00, Loss: 1.8123 (1.7747), Avg Acc: 0.6036 +2022-03-23 21:03:43,512 MASTER_LOG Epoch[018/090], Step[0160/0312], Lr: 6.261741e+00, Loss: 1.7212 (1.7777), Avg Acc: 0.6032 +2022-03-23 21:04:30,655 MASTER_LOG Epoch[018/090], Step[0180/0312], Lr: 6.259389e+00, Loss: 1.7605 (1.7795), Avg Acc: 0.6030 +2022-03-23 21:05:19,697 MASTER_LOG Epoch[018/090], Step[0200/0312], Lr: 6.257018e+00, Loss: 1.8141 (1.7811), Avg Acc: 0.6027 +2022-03-23 21:06:06,914 MASTER_LOG Epoch[018/090], Step[0220/0312], Lr: 6.254627e+00, Loss: 1.7788 (1.7819), Avg Acc: 0.6025 +2022-03-23 21:06:53,574 MASTER_LOG Epoch[018/090], Step[0240/0312], Lr: 6.252217e+00, Loss: 1.8607 (1.7849), Avg Acc: 0.6020 +2022-03-23 21:07:41,553 MASTER_LOG Epoch[018/090], Step[0260/0312], Lr: 6.249788e+00, Loss: 1.8300 (1.7862), Avg Acc: 0.6018 +2022-03-23 21:08:30,166 MASTER_LOG Epoch[018/090], Step[0280/0312], Lr: 6.247339e+00, Loss: 1.8413 (1.7872), Avg Acc: 0.6015 +2022-03-23 21:09:18,029 MASTER_LOG Epoch[018/090], Step[0300/0312], Lr: 6.244871e+00, Loss: 1.8008 (1.7882), Avg Acc: 0.6014 +2022-03-23 21:09:43,913 MASTER_LOG Epoch[018/090], Step[0311/0312], Lr: 6.243878e+00, Loss: 1.7580 (1.7884), Avg Acc: 0.6013 +2022-03-23 21:09:45,436 MASTER_LOG ----- Epoch[018/090], Lr: 6.243878e+00, time: 752.29Train Loss: 1.7884, Train Acc: 0.6013 +2022-03-23 21:09:45,436 MASTER_LOG ----- Validation after Epoch: 18 +2022-03-23 21:09:55,445 MASTER_LOG Step[0000/0013], Avg Loss: 1.1849, Avg Acc@1: 0.7065, Avg Acc@5: 0.8992 +2022-03-23 21:10:30,310 MASTER_LOG ----- Epoch[018/090], Validation Loss: 1.5351, Validation Acc@1: 0.6352, Validation Acc@5: 0.8516, time: 44.87 +2022-03-23 21:10:30,310 MASTER_LOG Train epoch 19. LR=6.243878e+00 +2022-03-23 21:10:37,855 MASTER_LOG Epoch[019/090], Step[0000/0312], Lr: 6.243381e+00, Loss: 1.7262 (1.7262), Avg Acc: 0.6021 +2022-03-23 21:11:26,190 MASTER_LOG Epoch[019/090], Step[0020/0312], Lr: 6.240882e+00, Loss: 1.7865 (1.7703), Avg Acc: 0.6051 +2022-03-23 21:12:14,151 MASTER_LOG Epoch[019/090], Step[0040/0312], Lr: 6.238364e+00, Loss: 1.7861 (1.7658), Avg Acc: 0.6051 +2022-03-23 21:13:01,570 MASTER_LOG Epoch[019/090], Step[0060/0312], Lr: 6.235826e+00, Loss: 1.7493 (1.7688), Avg Acc: 0.6052 +2022-03-23 21:13:48,928 MASTER_LOG Epoch[019/090], Step[0080/0312], Lr: 6.233270e+00, Loss: 1.7581 (1.7731), Avg Acc: 0.6046 +2022-03-23 21:14:35,522 MASTER_LOG Epoch[019/090], Step[0100/0312], Lr: 6.230694e+00, Loss: 1.7146 (1.7733), Avg Acc: 0.6044 +2022-03-23 21:15:23,033 MASTER_LOG Epoch[019/090], Step[0120/0312], Lr: 6.228099e+00, Loss: 1.7870 (1.7720), Avg Acc: 0.6045 +2022-03-23 21:16:09,798 MASTER_LOG Epoch[019/090], Step[0140/0312], Lr: 6.225485e+00, Loss: 1.7459 (1.7732), Avg Acc: 0.6043 +2022-03-23 21:16:57,441 MASTER_LOG Epoch[019/090], Step[0160/0312], Lr: 6.222851e+00, Loss: 1.7619 (1.7737), Avg Acc: 0.6043 +2022-03-23 21:17:44,866 MASTER_LOG Epoch[019/090], Step[0180/0312], Lr: 6.220199e+00, Loss: 1.7876 (1.7759), Avg Acc: 0.6040 +2022-03-23 21:18:32,692 MASTER_LOG Epoch[019/090], Step[0200/0312], Lr: 6.217527e+00, Loss: 1.7669 (1.7762), Avg Acc: 0.6039 +2022-03-23 21:19:21,075 MASTER_LOG Epoch[019/090], Step[0220/0312], Lr: 6.214836e+00, Loss: 1.7890 (1.7766), Avg Acc: 0.6036 +2022-03-23 21:20:07,694 MASTER_LOG Epoch[019/090], Step[0240/0312], Lr: 6.212126e+00, Loss: 1.8286 (1.7769), Avg Acc: 0.6036 +2022-03-23 21:20:55,738 MASTER_LOG Epoch[019/090], Step[0260/0312], Lr: 6.209397e+00, Loss: 1.7573 (1.7770), Avg Acc: 0.6036 +2022-03-23 21:21:44,257 MASTER_LOG Epoch[019/090], Step[0280/0312], Lr: 6.206649e+00, Loss: 1.8009 (1.7770), Avg Acc: 0.6035 +2022-03-23 21:22:31,710 MASTER_LOG Epoch[019/090], Step[0300/0312], Lr: 6.203882e+00, Loss: 1.8052 (1.7789), Avg Acc: 0.6032 +2022-03-23 21:22:56,359 MASTER_LOG Epoch[019/090], Step[0311/0312], Lr: 6.202770e+00, Loss: 1.8000 (1.7794), Avg Acc: 0.6031 +2022-03-23 21:22:57,972 MASTER_LOG ----- Epoch[019/090], Lr: 6.202770e+00, time: 747.55Train Loss: 1.7794, Train Acc: 0.6031 +2022-03-23 21:22:57,972 MASTER_LOG ----- Validation after Epoch: 19 +2022-03-23 21:23:07,949 MASTER_LOG Step[0000/0013], Avg Loss: 1.1821, Avg Acc@1: 0.6997, Avg Acc@5: 0.9026 +2022-03-23 21:23:42,546 MASTER_LOG ----- Epoch[019/090], Validation Loss: 1.5277, Validation Acc@1: 0.6365, Validation Acc@5: 0.8527, time: 44.57 +2022-03-23 21:23:42,546 MASTER_LOG Train epoch 20. LR=6.202770e+00 +2022-03-23 21:23:49,732 MASTER_LOG Epoch[020/090], Step[0000/0312], Lr: 6.202212e+00, Loss: 1.7179 (1.7179), Avg Acc: 0.6152 +2022-03-23 21:24:39,154 MASTER_LOG Epoch[020/090], Step[0020/0312], Lr: 6.199415e+00, Loss: 1.7531 (1.7315), Avg Acc: 0.6126 +2022-03-23 21:25:26,873 MASTER_LOG Epoch[020/090], Step[0040/0312], Lr: 6.196598e+00, Loss: 1.7739 (1.7403), Avg Acc: 0.6102 +2022-03-23 21:26:13,571 MASTER_LOG Epoch[020/090], Step[0060/0312], Lr: 6.193762e+00, Loss: 1.8049 (1.7522), Avg Acc: 0.6085 +2022-03-23 21:27:00,649 MASTER_LOG Epoch[020/090], Step[0080/0312], Lr: 6.190908e+00, Loss: 1.8493 (1.7610), Avg Acc: 0.6072 +2022-03-23 21:27:48,199 MASTER_LOG Epoch[020/090], Step[0100/0312], Lr: 6.188034e+00, Loss: 1.8441 (1.7624), Avg Acc: 0.6069 +2022-03-23 21:28:36,566 MASTER_LOG Epoch[020/090], Step[0120/0312], Lr: 6.185142e+00, Loss: 1.7810 (1.7633), Avg Acc: 0.6067 +2022-03-23 21:29:24,384 MASTER_LOG Epoch[020/090], Step[0140/0312], Lr: 6.182231e+00, Loss: 1.8142 (1.7632), Avg Acc: 0.6065 +2022-03-23 21:30:12,682 MASTER_LOG Epoch[020/090], Step[0160/0312], Lr: 6.179300e+00, Loss: 1.7346 (1.7651), Avg Acc: 0.6061 +2022-03-23 21:31:00,656 MASTER_LOG Epoch[020/090], Step[0180/0312], Lr: 6.176351e+00, Loss: 1.7095 (1.7650), Avg Acc: 0.6061 +2022-03-23 21:31:48,723 MASTER_LOG Epoch[020/090], Step[0200/0312], Lr: 6.173383e+00, Loss: 1.7334 (1.7659), Avg Acc: 0.6057 +2022-03-23 21:32:36,348 MASTER_LOG Epoch[020/090], Step[0220/0312], Lr: 6.170396e+00, Loss: 1.7154 (1.7667), Avg Acc: 0.6054 +2022-03-23 21:33:23,279 MASTER_LOG Epoch[020/090], Step[0240/0312], Lr: 6.167391e+00, Loss: 1.7712 (1.7686), Avg Acc: 0.6051 +2022-03-23 21:34:09,377 MASTER_LOG Epoch[020/090], Step[0260/0312], Lr: 6.164366e+00, Loss: 1.8693 (1.7704), Avg Acc: 0.6051 +2022-03-23 21:34:57,062 MASTER_LOG Epoch[020/090], Step[0280/0312], Lr: 6.161323e+00, Loss: 1.7692 (1.7699), Avg Acc: 0.6052 +2022-03-23 21:35:45,022 MASTER_LOG Epoch[020/090], Step[0300/0312], Lr: 6.158261e+00, Loss: 1.7622 (1.7697), Avg Acc: 0.6052 +2022-03-23 21:36:10,158 MASTER_LOG Epoch[020/090], Step[0311/0312], Lr: 6.157031e+00, Loss: 1.8980 (1.7706), Avg Acc: 0.6052 +2022-03-23 21:36:12,200 MASTER_LOG ----- Epoch[020/090], Lr: 6.157031e+00, time: 749.60Train Loss: 1.7706, Train Acc: 0.6052 +2022-03-23 21:36:12,200 MASTER_LOG ----- Validation after Epoch: 20 +2022-03-23 21:36:21,624 MASTER_LOG Step[0000/0013], Avg Loss: 1.1718, Avg Acc@1: 0.7034, Avg Acc@5: 0.8975 +2022-03-23 21:36:58,563 MASTER_LOG ----- Epoch[020/090], Validation Loss: 1.5293, Validation Acc@1: 0.6387, Validation Acc@5: 0.8520, time: 46.36 +2022-03-23 21:36:59,485 MASTER_LOG ----- Save model: ./output/linearprobe-20220323-17-11/LINEARPROBE-Epoch-20-Loss-1.5293068207359315.pdparams +2022-03-23 21:36:59,485 MASTER_LOG Train epoch 21. LR=6.157031e+00 +2022-03-23 21:37:06,196 MASTER_LOG Epoch[021/090], Step[0000/0312], Lr: 6.156415e+00, Loss: 1.7359 (1.7359), Avg Acc: 0.6189 +2022-03-23 21:37:55,066 MASTER_LOG Epoch[021/090], Step[0020/0312], Lr: 6.153322e+00, Loss: 1.7529 (1.7473), Avg Acc: 0.6093 +2022-03-23 21:38:42,580 MASTER_LOG Epoch[021/090], Step[0040/0312], Lr: 6.150212e+00, Loss: 1.8073 (1.7423), Avg Acc: 0.6095 +2022-03-23 21:39:31,084 MASTER_LOG Epoch[021/090], Step[0060/0312], Lr: 6.147082e+00, Loss: 1.7614 (1.7431), Avg Acc: 0.6092 +2022-03-23 21:40:18,476 MASTER_LOG Epoch[021/090], Step[0080/0312], Lr: 6.143934e+00, Loss: 1.7751 (1.7444), Avg Acc: 0.6090 +2022-03-23 21:41:08,304 MASTER_LOG Epoch[021/090], Step[0100/0312], Lr: 6.140767e+00, Loss: 1.7964 (1.7510), Avg Acc: 0.6081 +2022-03-23 21:41:56,179 MASTER_LOG Epoch[021/090], Step[0120/0312], Lr: 6.137582e+00, Loss: 1.8092 (1.7529), Avg Acc: 0.6077 +2022-03-23 21:42:44,980 MASTER_LOG Epoch[021/090], Step[0140/0312], Lr: 6.134378e+00, Loss: 1.7442 (1.7531), Avg Acc: 0.6075 +2022-03-23 21:43:32,982 MASTER_LOG Epoch[021/090], Step[0160/0312], Lr: 6.131155e+00, Loss: 1.7009 (1.7544), Avg Acc: 0.6073 +2022-03-23 21:44:21,348 MASTER_LOG Epoch[021/090], Step[0180/0312], Lr: 6.127914e+00, Loss: 1.7572 (1.7551), Avg Acc: 0.6071 +2022-03-23 21:45:08,563 MASTER_LOG Epoch[021/090], Step[0200/0312], Lr: 6.124655e+00, Loss: 1.7200 (1.7577), Avg Acc: 0.6069 +2022-03-23 21:45:55,625 MASTER_LOG Epoch[021/090], Step[0220/0312], Lr: 6.121376e+00, Loss: 1.8005 (1.7588), Avg Acc: 0.6068 +2022-03-23 21:46:43,128 MASTER_LOG Epoch[021/090], Step[0240/0312], Lr: 6.118080e+00, Loss: 1.7958 (1.7601), Avg Acc: 0.6066 +2022-03-23 21:47:30,203 MASTER_LOG Epoch[021/090], Step[0260/0312], Lr: 6.114764e+00, Loss: 1.8398 (1.7616), Avg Acc: 0.6062 +2022-03-23 21:48:17,876 MASTER_LOG Epoch[021/090], Step[0280/0312], Lr: 6.111431e+00, Loss: 1.7314 (1.7621), Avg Acc: 0.6061 +2022-03-23 21:49:05,604 MASTER_LOG Epoch[021/090], Step[0300/0312], Lr: 6.108078e+00, Loss: 1.7352 (1.7623), Avg Acc: 0.6061 +2022-03-23 21:49:34,137 MASTER_LOG Epoch[021/090], Step[0311/0312], Lr: 6.106732e+00, Loss: 1.8182 (1.7625), Avg Acc: 0.6062 +2022-03-23 21:49:36,654 MASTER_LOG ----- Epoch[021/090], Lr: 6.106732e+00, time: 757.05Train Loss: 1.7625, Train Acc: 0.6062 +2022-03-23 21:49:36,654 MASTER_LOG ----- Validation after Epoch: 21 +2022-03-23 21:49:46,059 MASTER_LOG Step[0000/0013], Avg Loss: 1.1663, Avg Acc@1: 0.7026, Avg Acc@5: 0.9021 +2022-03-23 21:50:21,707 MASTER_LOG ----- Epoch[021/090], Validation Loss: 1.5158, Validation Acc@1: 0.6399, Validation Acc@5: 0.8543, time: 45.05 +2022-03-23 21:50:21,708 MASTER_LOG Train epoch 22. LR=6.106732e+00 +2022-03-23 21:50:29,050 MASTER_LOG Epoch[022/090], Step[0000/0312], Lr: 6.106058e+00, Loss: 1.6650 (1.6650), Avg Acc: 0.6182 +2022-03-23 21:51:17,473 MASTER_LOG Epoch[022/090], Step[0020/0312], Lr: 6.102676e+00, Loss: 1.7864 (1.7523), Avg Acc: 0.6099 +2022-03-23 21:52:05,548 MASTER_LOG Epoch[022/090], Step[0040/0312], Lr: 6.099276e+00, Loss: 1.7083 (1.7444), Avg Acc: 0.6105 +2022-03-23 21:52:54,137 MASTER_LOG Epoch[022/090], Step[0060/0312], Lr: 6.095858e+00, Loss: 1.7003 (1.7483), Avg Acc: 0.6097 +2022-03-23 21:53:41,301 MASTER_LOG Epoch[022/090], Step[0080/0312], Lr: 6.092421e+00, Loss: 1.7755 (1.7491), Avg Acc: 0.6095 +2022-03-23 21:54:28,534 MASTER_LOG Epoch[022/090], Step[0100/0312], Lr: 6.088966e+00, Loss: 1.7836 (1.7510), Avg Acc: 0.6091 +2022-03-23 21:55:15,544 MASTER_LOG Epoch[022/090], Step[0120/0312], Lr: 6.085493e+00, Loss: 1.8172 (1.7524), Avg Acc: 0.6088 +2022-03-23 21:56:02,372 MASTER_LOG Epoch[022/090], Step[0140/0312], Lr: 6.082001e+00, Loss: 1.8078 (1.7533), Avg Acc: 0.6086 +2022-03-23 21:56:49,631 MASTER_LOG Epoch[022/090], Step[0160/0312], Lr: 6.078491e+00, Loss: 1.7503 (1.7545), Avg Acc: 0.6082 +2022-03-23 21:57:36,759 MASTER_LOG Epoch[022/090], Step[0180/0312], Lr: 6.074963e+00, Loss: 1.7682 (1.7561), Avg Acc: 0.6080 +2022-03-23 21:58:23,616 MASTER_LOG Epoch[022/090], Step[0200/0312], Lr: 6.071416e+00, Loss: 1.6749 (1.7561), Avg Acc: 0.6079 +2022-03-23 21:59:11,700 MASTER_LOG Epoch[022/090], Step[0220/0312], Lr: 6.067852e+00, Loss: 1.6856 (1.7578), Avg Acc: 0.6077 +2022-03-23 21:59:59,243 MASTER_LOG Epoch[022/090], Step[0240/0312], Lr: 6.064269e+00, Loss: 1.6498 (1.7570), Avg Acc: 0.6078 +2022-03-23 22:00:46,193 MASTER_LOG Epoch[022/090], Step[0260/0312], Lr: 6.060668e+00, Loss: 1.6393 (1.7559), Avg Acc: 0.6080 +2022-03-23 22:01:33,113 MASTER_LOG Epoch[022/090], Step[0280/0312], Lr: 6.057049e+00, Loss: 1.7817 (1.7563), Avg Acc: 0.6080 +2022-03-23 22:02:19,313 MASTER_LOG Epoch[022/090], Step[0300/0312], Lr: 6.053412e+00, Loss: 1.7514 (1.7565), Avg Acc: 0.6079 +2022-03-23 22:02:43,113 MASTER_LOG Epoch[022/090], Step[0311/0312], Lr: 6.051952e+00, Loss: 1.7274 (1.7567), Avg Acc: 0.6079 +2022-03-23 22:02:44,536 MASTER_LOG ----- Epoch[022/090], Lr: 6.051952e+00, time: 742.76Train Loss: 1.7567, Train Acc: 0.6079 +2022-03-23 22:02:44,536 MASTER_LOG ----- Validation after Epoch: 22 +2022-03-23 22:02:53,949 MASTER_LOG Step[0000/0013], Avg Loss: 1.1730, Avg Acc@1: 0.7012, Avg Acc@5: 0.9028 +2022-03-23 22:03:27,869 MASTER_LOG ----- Epoch[022/090], Validation Loss: 1.5087, Validation Acc@1: 0.6450, Validation Acc@5: 0.8553, time: 43.33 +2022-03-23 22:03:27,869 MASTER_LOG Train epoch 23. LR=6.051952e+00 +2022-03-23 22:03:35,672 MASTER_LOG Epoch[023/090], Step[0000/0312], Lr: 6.051221e+00, Loss: 1.6996 (1.6996), Avg Acc: 0.6177 +2022-03-23 22:04:24,975 MASTER_LOG Epoch[023/090], Step[0020/0312], Lr: 6.047555e+00, Loss: 1.7716 (1.7292), Avg Acc: 0.6129 +2022-03-23 22:05:13,528 MASTER_LOG Epoch[023/090], Step[0040/0312], Lr: 6.043871e+00, Loss: 1.7322 (1.7258), Avg Acc: 0.6134 +2022-03-23 22:06:01,375 MASTER_LOG Epoch[023/090], Step[0060/0312], Lr: 6.040168e+00, Loss: 1.7378 (1.7269), Avg Acc: 0.6128 +2022-03-23 22:06:49,569 MASTER_LOG Epoch[023/090], Step[0080/0312], Lr: 6.036448e+00, Loss: 1.7751 (1.7292), Avg Acc: 0.6118 +2022-03-23 22:07:36,929 MASTER_LOG Epoch[023/090], Step[0100/0312], Lr: 6.032710e+00, Loss: 1.7473 (1.7328), Avg Acc: 0.6111 +2022-03-23 22:08:23,681 MASTER_LOG Epoch[023/090], Step[0120/0312], Lr: 6.028954e+00, Loss: 1.7562 (1.7354), Avg Acc: 0.6108 +2022-03-23 22:09:10,485 MASTER_LOG Epoch[023/090], Step[0140/0312], Lr: 6.025180e+00, Loss: 1.7505 (1.7353), Avg Acc: 0.6106 +2022-03-23 22:09:57,172 MASTER_LOG Epoch[023/090], Step[0160/0312], Lr: 6.021388e+00, Loss: 1.7875 (1.7380), Avg Acc: 0.6104 +/usr/local/lib/python3.7/site-packages/paddlenlp/transformers/funnel/modeling.py:30: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working + from collections import Iterable +Compose( + + + + + + +) +----- Imagenet2012 train_list.txt len = 1281167 +----- Imagenet2012 val_list.txt len = 50000 +2022-03-23 22:10:37,296 MASTER_LOG ----- world_size = 8, local_rank = 0 +----- AMP: True +BASE: [''] +DATA: + BATCH_SIZE: 2 + BATCH_SIZE_EVAL: 2 + CROP_PCT: 0.875 + DATASET: imagenet2012 + DATA_PATH: /dataset/imagenet + IMAGENET_MEAN: [0.485, 0.456, 0.406] + IMAGENET_STD: [0.229, 0.224, 0.225] + IMAGE_CHANNELS: 3 + IMAGE_SIZE: 224 + NUM_WORKERS: 2 +EVAL: False +MODEL: + ATTENTION_DROPOUT: 0.0 + DECODER: + DEPTH: 8 + EMBED_DIM: 512 + NUM_HEADS: 16 + DROPOUT: 0.0 + DROPPATH: 0.1 + ENCODER: + DEPTH: 12 + EMBED_DIM: 768 + NUM_HEADS: 12 + GLOBAL_POOL: True + MASK_RATIO: 0.75 + MLP_RATIO: 4.0 + NAME: vit_base_patch16_224 + NORM_PIX_LOSS: True + NUM_CLASSES: 1000 + PATCH_SIZE: 16 + PRETRAINED: ./mae_pretrain_vit_base.pdparams + QKV_BIAS: True + RESUME: None + TYPE: FINETUNE +REPORT_FREQ: 100 +SAVE: ./output/finetune-20220323-22-10 +SAVE_FREQ: 10 +SEED: 0 +TRAIN: + ACCUM_ITER: 4 + AUTO_AUGMENT: False + BASE_LR: 0.0005 + COLOR_JITTER: 0.4 + CUTMIX_ALPHA: 1.0 + CUTMIX_MINMAX: None + END_LR: 1e-06 + GRAD_CLIP: None + LAST_EPOCH: 0 + LAYER_DECAY: 0.65 + LINEAR_SCALED_LR: 256 + MIXUP_ALPHA: 0.8 + MIXUP_MODE: batch + MIXUP_PROB: 1.0 + MIXUP_SWITCH_PROB: 0.5 + NUM_EPOCHS: 100 + OPTIMIZER: + BETAS: (0.9, 0.999) + EPS: 1e-08 + NAME: AdamWDL + RANDOM_ERASE_COUNT: 1 + RANDOM_ERASE_MODE: pixel + RANDOM_ERASE_PROB: 0.25 + RANDOM_ERASE_SPLIT: False + RAND_AUGMENT: True + RAND_AUGMENT_LAYERS: 2 + RAND_AUGMENT_MAGNITUDE: 9 + SMOOTHING: 0.1 + WARMUP_EPOCHS: 5 + WARMUP_START_LR: 0.0 + WEIGHT_DECAY: 0.05 +VALIDATE_FREQ: 1 +2022-03-23 22:10:37,296 MASTER_LOG ----- world_size = 8, local_rank = 0 +----- AMP: True +BASE: [''] +DATA: + BATCH_SIZE: 2 + BATCH_SIZE_EVAL: 2 + CROP_PCT: 0.875 + DATASET: imagenet2012 + DATA_PATH: /dataset/imagenet + IMAGENET_MEAN: [0.485, 0.456, 0.406] + IMAGENET_STD: [0.229, 0.224, 0.225] + IMAGE_CHANNELS: 3 + IMAGE_SIZE: 224 + NUM_WORKERS: 2 +EVAL: False +MODEL: + ATTENTION_DROPOUT: 0.0 + DECODER: + DEPTH: 8 + EMBED_DIM: 512 + NUM_HEADS: 16 + DROPOUT: 0.0 + DROPPATH: 0.1 + ENCODER: + DEPTH: 12 + EMBED_DIM: 768 + NUM_HEADS: 12 + GLOBAL_POOL: True + MASK_RATIO: 0.75 + MLP_RATIO: 4.0 + NAME: vit_base_patch16_224 + NORM_PIX_LOSS: True + NUM_CLASSES: 1000 + PATCH_SIZE: 16 + PRETRAINED: ./mae_pretrain_vit_base.pdparams + QKV_BIAS: True + RESUME: None + TYPE: FINETUNE +REPORT_FREQ: 100 +SAVE: ./output/finetune-20220323-22-10 +SAVE_FREQ: 10 +SEED: 0 +TRAIN: + ACCUM_ITER: 4 + AUTO_AUGMENT: False + BASE_LR: 0.0005 + COLOR_JITTER: 0.4 + CUTMIX_ALPHA: 1.0 + CUTMIX_MINMAX: None + END_LR: 1e-06 + GRAD_CLIP: None + LAST_EPOCH: 0 + LAYER_DECAY: 0.65 + LINEAR_SCALED_LR: 256 diff --git a/image_classification/MAE/pos_embed.py b/self_supervised_learning/MAE/pos_embed.py similarity index 100% rename from image_classification/MAE/pos_embed.py rename to self_supervised_learning/MAE/pos_embed.py diff --git a/image_classification/MAE/random_erasing.py b/self_supervised_learning/MAE/random_erasing.py similarity index 100% rename from image_classification/MAE/random_erasing.py rename to self_supervised_learning/MAE/random_erasing.py diff --git a/self_supervised_learning/MAE/run_finetune_vit_b.sh b/self_supervised_learning/MAE/run_finetune_vit_b.sh new file mode 100644 index 00000000..ecc1fc10 --- /dev/null +++ b/self_supervised_learning/MAE/run_finetune_vit_b.sh @@ -0,0 +1,7 @@ +python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_finetune.py \ +-cfg='./configs/vit_base_patch16_224_finetune.yaml' \ +-dataset='imagenet2012' \ +-batch_size=32 \ +-data_path='/dataset/imagenet' \ +-pretrained='./mae_pretrain_vit_base.pdparams' \ +-amp \ diff --git a/self_supervised_learning/MAE/run_finetune_vit_b_1node.sh b/self_supervised_learning/MAE/run_finetune_vit_b_1node.sh new file mode 100644 index 00000000..d3fd78ad --- /dev/null +++ b/self_supervised_learning/MAE/run_finetune_vit_b_1node.sh @@ -0,0 +1,8 @@ +python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_finetune.py \ +-cfg='./configs/vit_base_patch16_224_finetune.yaml' \ +-dataset='imagenet2012' \ +-batch_size=2 \ +-data_path='/dataset/imagenet' \ +-pretrained='./mae_pretrain_vit_base.pdparams' \ +-accum_iter=4 \ +-amp \ diff --git a/image_classification/MAE/run_linearprobe_vit_b_single_node.sh b/self_supervised_learning/MAE/run_linearprobe_vit_b.sh similarity index 100% rename from image_classification/MAE/run_linearprobe_vit_b_single_node.sh rename to self_supervised_learning/MAE/run_linearprobe_vit_b.sh diff --git a/image_classification/MAE/run_linearprobe_vit_b.sh b/self_supervised_learning/MAE/run_linearprobe_vit_b_1node.sh similarity index 64% rename from image_classification/MAE/run_linearprobe_vit_b.sh rename to self_supervised_learning/MAE/run_linearprobe_vit_b_1node.sh index e954f863..6b07f906 100644 --- a/image_classification/MAE/run_linearprobe_vit_b.sh +++ b/self_supervised_learning/MAE/run_linearprobe_vit_b_1node.sh @@ -1,10 +1,9 @@ #CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ #python main_multi_gpu_linearprobe.py \ GLOG_v=0 python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" main_multi_gpu_linearprobe.py \ --cfg='./configs/vit_base_patch16_224_linearprobe.yaml' \ +-cfg='./configs/vit_base_patch16_224_linearprobe_single_node.yaml' \ -dataset='imagenet2012' \ -batch_size=512 \ --accum_iter=2 \ # orriginal effective batch_size = 512bs * 4nodes * 8gpus. So for 2 node, accum_iter should be 2 -data_path='/dataset/imagenet' \ -pretrained='./mae_pretrain_vit_base.pdparams' \ -amp \ diff --git a/image_classification/MAE/run_pretrain_vit_b.sh b/self_supervised_learning/MAE/run_pretrain_vit_b.sh similarity index 100% rename from image_classification/MAE/run_pretrain_vit_b.sh rename to self_supervised_learning/MAE/run_pretrain_vit_b.sh diff --git a/image_classification/MAE/transformer.py b/self_supervised_learning/MAE/transformer.py similarity index 94% rename from image_classification/MAE/transformer.py rename to self_supervised_learning/MAE/transformer.py index 694e76bd..1286bc77 100644 --- a/image_classification/MAE/transformer.py +++ b/self_supervised_learning/MAE/transformer.py @@ -71,12 +71,22 @@ def __init__(self, dropout=0.): super().__init__() self.n_patches = (image_size // patch_size) * (image_size // patch_size) + w_attr_1, b_attr_1 = self._init_weights() self.patch_embedding = nn.Conv2D(in_channels=in_channels, out_channels=embed_dim, kernel_size=patch_size, - stride=patch_size) + stride=patch_size, + weight_attr=w_attr_1, + bias_attr=b_attr_1) self.dropout = nn.Dropout(dropout) + def _init_weights(self): + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.XavierUniform()) # MAE + bias_attr = paddle.ParamAttr( + initializer=nn.initializer.Constant(0.0)) + return weight_attr, bias_attr + def forward(self, x): x = self.patch_embedding(x) x = x.flatten(2) @@ -149,8 +159,8 @@ def forward(self, x): qkv = self.qkv(x).chunk(3, axis=-1) q, k, v = map(self.transpose_multihead, qkv) + q = q * self.scales attn = paddle.matmul(q, k, transpose_y=True) - attn = attn * self.scales attn = self.softmax(attn) attn = self.attn_dropout(attn) @@ -734,13 +744,12 @@ def _init_weights_norm(self): return weight_attr, bias_attr def _init_weights_linear(self): - weight_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform()) # MAE + weight_attr = paddle.ParamAttr(initializer=nn.initializer.TruncatedNormal(std=0.02)) # MAE linearprobe bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) return weight_attr, bias_attr def _init_weights_classifier(self): - #weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=2e-5)) - weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) + weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=0.01)) bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0)) return weight_attr, bias_attr @@ -748,34 +757,34 @@ def _init_weights_classifier(self): def build_mae_pretrain(config): """ build MAE vit model for pretraining""" model = MAEPretrainTransformer(image_size=config.DATA.IMAGE_SIZE, - patch_size=config.MODEL.TRANS.PATCH_SIZE, - in_channels=3, - encoder_embed_dim=config.MODEL.TRANS.ENCODER.EMBED_DIM, - decoder_embed_dim=config.MODEL.TRANS.DECODER.EMBED_DIM, - encoder_depth=config.MODEL.TRANS.ENCODER.DEPTH, - decoder_depth=config.MODEL.TRANS.DECODER.DEPTH, - encoder_num_heads=config.MODEL.TRANS.ENCODER.NUM_HEADS, - decoder_num_heads=config.MODEL.TRANS.DECODER.NUM_HEADS, - mlp_ratio=config.MODEL.TRANS.MLP_RATIO, - qkv_bias=config.MODEL.TRANS.QKV_BIAS, + patch_size=config.MODEL.PATCH_SIZE, + in_channels=config.DATA.IMAGE_CHANNELS, + encoder_embed_dim=config.MODEL.ENCODER.EMBED_DIM, + decoder_embed_dim=config.MODEL.DECODER.EMBED_DIM, + encoder_depth=config.MODEL.ENCODER.DEPTH, + decoder_depth=config.MODEL.DECODER.DEPTH, + encoder_num_heads=config.MODEL.ENCODER.NUM_HEADS, + decoder_num_heads=config.MODEL.DECODER.NUM_HEADS, + mlp_ratio=config.MODEL.MLP_RATIO, + qkv_bias=config.MODEL.QKV_BIAS, dropout=config.MODEL.DROPOUT, attention_dropout=config.MODEL.ATTENTION_DROPOUT, droppath=config.MODEL.DROPPATH, - norm_pix_loss=config.MODEL.TRANS.NORM_PIX_LOSS) + norm_pix_loss=config.MODEL.NORM_PIX_LOSS) return model def build_transformer(config): """ build vit model for finetuning and linear probing""" model = MAETransformer(image_size=config.DATA.IMAGE_SIZE, - patch_size=config.MODEL.TRANS.PATCH_SIZE, - in_channels=3, + patch_size=config.MODEL.PATCH_SIZE, + in_channels=config.DATA.IMAGE_CHANNELS, num_classes=config.MODEL.NUM_CLASSES, - embed_dim=config.MODEL.TRANS.ENCODER.EMBED_DIM, - depth=config.MODEL.TRANS.ENCODER.DEPTH, - num_heads=config.MODEL.TRANS.ENCODER.NUM_HEADS, - mlp_ratio=config.MODEL.TRANS.MLP_RATIO, - qkv_bias=config.MODEL.TRANS.QKV_BIAS, + embed_dim=config.MODEL.ENCODER.EMBED_DIM, + depth=config.MODEL.ENCODER.DEPTH, + num_heads=config.MODEL.ENCODER.NUM_HEADS, + mlp_ratio=config.MODEL.MLP_RATIO, + qkv_bias=config.MODEL.QKV_BIAS, global_pool=config.MODEL.GLOBAL_POOL, dropout=config.MODEL.DROPOUT, attention_dropout=config.MODEL.ATTENTION_DROPOUT, diff --git a/image_classification/MAE/utils.py b/self_supervised_learning/MAE/utils.py similarity index 61% rename from image_classification/MAE/utils.py rename to self_supervised_learning/MAE/utils.py index 10946200..3636b954 100644 --- a/image_classification/MAE/utils.py +++ b/self_supervised_learning/MAE/utils.py @@ -19,11 +19,101 @@ """ -import math -import numpy as np +import logging +import sys +import os import paddle from paddle.optimizer.lr import LRScheduler + +def get_logger(file_path): + """Set logging file and format, logs are written in 2 loggers, one local_logger records + the information on its own gpu/process, one master_logger records the overall/average + information over all gpus/processes. + Args: + file_path: str, folder path of the logger files to write + Return: + local_logger: python logger for each process + master_logger: python logger for overall processes (on node 0) + """ + local_rank = paddle.distributed.get_rank() + filename = os.path.join(file_path, 'log_all.txt') + log_format = "%(asctime)s %(message)s" + logging.basicConfig(filename=filename, level=logging.INFO, + format=log_format, datefmt="%m%d %I:%M:%S %p") + + # local_logger for each process/GPU + local_logger = logging.getLogger(f'local_{local_rank}') + filename = os.path.join(file_path, f'log_{local_rank}.txt') + fh = logging.FileHandler(filename) + fh.setFormatter(logging.Formatter(log_format)) + local_logger.addHandler(fh) + + # master_logger records avg performance and general message + if local_rank == 0: + master_logger = logging.getLogger('master') + # log.txt + filename = os.path.join(file_path, 'log.txt') + fh = logging.FileHandler(filename) + fh.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(fh) + # consol (stdout) + sh_1 = logging.StreamHandler(sys.stdout) + sh_1.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(sh_1) + # consol (stderr) + sh_2 = logging.StreamHandler(sys.stderr) + sh_2.setFormatter(logging.Formatter(log_format)) + master_logger.addHandler(sh_2) + else: + master_logger = None + return local_logger, master_logger + + +def write_log(local_logger, master_logger, msg_local, msg_master=None, level='info'): + """Write messages in loggers + Args: + local_logger: python logger, logs information on single gpu + master_logger: python logger, logs information over all gpus + msg_local: str, message to log on local_logger + msg_master: str, message to log on master_logger, if None, use msg_local, default: None + level: str, log level, in ['info', 'warning', 'fatal'], default: 'info' + """ + # write log to local logger + if local_logger: + if level == 'info': + local_logger.info(msg_local) + elif level == 'warning': + local_logger.warning(msg_local) + elif level == 'fatal': + local_logger.fatal(msg_local) + else: + raise ValueError("level must in ['info', 'warning', 'fatal']") + # write log to master logger on node 0 + if master_logger and paddle.distributed.get_rank() == 0: + if msg_master is None: + msg_master = msg_local + if level == 'info': + master_logger.info("MASTER_LOG " + msg_master) + elif level == 'warning': + master_logger.warning("MASTER_LOG " + msg_master) + elif level == 'fatal': + master_logger.fatal("MASTER_LOG " + msg_master) + else: + raise ValueError("level must in ['info', 'warning', 'fatal']") + + +def all_reduce_mean(x): + """perform all_reduce on Tensor for gathering results from multi-gpus""" + world_size = paddle.distributed.get_world_size() + if world_size > 1: + x_reduce = paddle.to_tensor(x) + paddle.distributed.all_reduce(x_reduce) + x_reduce = x_reduce / world_size + return x_reduce.item() + return x + + def get_params_groups(model, weight_decay=0.01): regularized = [] not_regularized = [] @@ -93,38 +183,6 @@ def interpolate_pos_embed(model, state_dict, key_name='encoder_position_embeddin state_dict[key_name] = new_pos_embed -#TODO: check correctness -class LARS(paddle.optimizer.Optimizer): - """LARS optmizer""" - def __init__(self, params, learning_rate=0., weight_decay=0., momentum=0., trust_coefficient=0.001): - super().__init__(params, learning_rate=learning_rate, weight_decay=weight_decay) - - @paddle.no_grad() - def step(self): - for g in self.param_groups: - for p in g['params']: - dp = p.grad - if dp is None: - continue - if p.ndim > 1: - dp = dp.add(p, alpha=g['weight_decay']) - param_norm = paddle.norm(p) - update_norm = paddle.norm(dp) - one = paddle.ones_list(param_norm) - q = paddle.where(param_norm >0., - paddle.where(update_norm > 0, - (g['trust_coefficient'] * param_norm / update_norm), - one), - one) - dp = dp.mul(q) - param_state = self.state[p] - if 'mu' not in param_state: - param_state['mu'] = paddle.zeros_like(p) - mu = param_state['mu'] - mu.mul_(g['momentum']).add_(dp) - p.add_(mu, alpha=-g['lr']) - - class AverageMeter(): """ Meter for monitoring losses""" def __init__(self): @@ -146,8 +204,7 @@ def update(self, val, n=1): self.avg = self.sum / self.cnt - -def get_exclude_from_weight_decay_fn(exclude_list=[]): +def skip_weight_decay_fn(model, skip_list=[], filter_bias_and_bn=True): """ Set params with no weight decay during the training For certain params, e.g., positional encoding in ViT, weight decay @@ -155,18 +212,27 @@ def get_exclude_from_weight_decay_fn(exclude_list=[]): these params. Args: - exclude_list: a list of params names which need to exclude - from weight decay. + model: nn.Layer, model + skip_list: list, a list of params names which need to exclude + from weight decay, default: [] + filter_bias_and_bn: bool, set True to exclude bias and bn in model, default: True Returns: exclude_from_weight_decay_fn: a function returns True if param will be excluded from weight decay """ - if len(exclude_list) == 0: + if len(skip_list) == 0 and not filter_bias_and_bn: exclude_from_weight_decay_fn = None else: + skip_list_all = [] + for name, param in model.named_parameters(): + if param.stop_gradient: + continue + if len(param.shape) == 1 or name.endswith('.bias') or name in skip_list: + skip_list_all.append(name) + def exclude_fn(param): - for name in exclude_list: - if param.endswith(name): + for name in skip_list_all: + if param == name: return False return True exclude_from_weight_decay_fn = exclude_fn