forked from pytti-tools/pytti-core
-
Notifications
You must be signed in to change notification settings - Fork 0
/
structured_config.py
205 lines (160 loc) · 5.18 KB
/
structured_config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
from dataclasses import MISSING
from functools import partial
from typing import Optional
import hydra
from attrs import define, field
from hydra.core.config_store import ConfigStore
def check_input_against_list(attribute, value, valid_values):
if value not in valid_values:
raise ValueError(
f"{value} is not a valid input for {attribute.name} Valid inputs are {valid_values}"
)
@define(auto_attribs=True)
class ConfigSchema:
#############
## Prompts ##
#############
scenes: str = ""
scene_prefix: str = ""
scene_suffix: str = ""
direct_image_prompts: str = ""
init_image: str = ""
direct_init_weight: str = ""
semantic_init_weight: str = ""
##################################
image_model: str = "VQGAN"
vqgan_model: str = "sflckr"
animation_mode: str = field(default="off")
@animation_mode.validator
def check(self, attribute, value):
check_input_against_list(
attribute, value, valid_values=["off", "2D", "3D", "Video Source"]
)
##################################
width: int = 180
height: int = 112
steps_per_scene: int = 100
steps_per_frame: int = 50
interpolation_steps: int = 0
learning_rate: Optional[float] = None # based on pytti.Image.DifferentiableImage
reset_lr_each_frame: bool = True
seed: str = "${now:%f}" # microsecond component of timestamp. Basically random.
cutouts: int = 40
cut_pow: int = 2
cutout_border: float = 0.25
border_mode: str = field(default="clamp")
@border_mode.validator
def check(self, attribute, value):
check_input_against_list(
attribute, value, valid_values=["clamp", "mirror", "wrap", "black", "smear"]
)
##################################
##########
# Camera #
##########
field_of_view: int = 60
near_plane: int = 1
far_plane: int = 10000
######################
### Induced Motion ###
######################
input_audio: str = ""
input_audio_offset: float = 0
input_audio_window_size: int = 1024
input_audio_band_split_low_medium: int = 500
input_audio_band_split_medium_high: int = 3500
# _2d and _3d only apply to those animation modes
translate_x: str = "0"
translate_y: str = "0"
translate_z_3d: str = "0"
rotate_3d: str = (
"[1, 0, 0, 0]"
)
rotate_2d: str = "0"
zoom_x_2d: str = "0"
zoom_y_2d: str = "0"
sampling_mode: str = field(default="bicubic")
@sampling_mode.validator
def check(self, attribute, value):
check_input_against_list(
attribute, value, valid_values=["nearest", "bilinear", "bicubic"]
)
infill_mode: str = field(default="wrap")
@infill_mode.validator
def check(self, attribute, value):
check_input_against_list(
attribute, value, valid_values=["mirror", "wrap", "black", "smear"]
)
pre_animation_steps: int = 100
lock_camera: bool = True
##################################
#######################
### Limited Palette ###
#######################
pixel_size: int = 4
smoothing_weight: float = 0.02
random_initial_palette: bool = False
palette_size: int = 6
palettes: int = 9
gamma: int = 1
hdr_weight: float = 0.01
palette_normalization_weight: float = 0.2
show_palette: bool = False
target_palette: str = ""
lock_palette: bool = False
##############
### ffmpeg ###
##############
frames_per_second: int = 12
direct_stabilization_weight: str = ""
semantic_stabilization_weight: str = ""
depth_stabilization_weight: str = ""
edge_stabilization_weight: str = ""
flow_stabilization_weight: str = ""
#####################################
### animation_mode = Video Source ###
#####################################
video_path: str = ""
frame_stride: int = 1
reencode_each_frame: bool = True
flow_long_term_samples: int = 1
############
### CLIP ###
############
ViTB32: bool = True
ViTB16: bool = False
ViTL14: bool = False
RN50: bool = False
RN101: bool = False
RN50x4: bool = False
RN50x16: bool = False
RN50x64: bool = False
###############
### Outputs ###
###############
file_namespace: str = "default"
allow_overwrite: bool = False
display_every: int = 50
clear_every: int = 0
display_scale: int = 1
save_every: int = 50
backups: int = 0
show_graphs: bool = False
approximate_vram_usage: bool = False
#####################################
#################
### Model I/O ###
#################
# This is where pytti will expect to find model weights.
# Each model will be assigned a separate subdirectory within this folder
# If the expected model artifacts are not present, pytti will attempt to download them.
models_parent_dir: str = "${user_cache:}"
######################################
##########################
### Performance tuning ###
##########################
gradient_accumulation_steps: int = 1
def register():
cs = ConfigStore.instance()
cs.store(name="config_schema", node=ConfigSchema)
register()