Skip to content

Commit

Permalink
offload: fixes for 4090 batch size
Browse files Browse the repository at this point in the history
  • Loading branch information
bghira committed Jul 16, 2023
1 parent 1d04d0e commit c733e0a
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 6 deletions.
6 changes: 5 additions & 1 deletion discord_tron_client/classes/app_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,4 +282,8 @@ def bark_subsystem_type(self):
def enable_compel(self):
return self.config.get("use_compel_prompt_weighting", True)
def enable_compile(self):
return self.config.get('enable_torch_compile', True)
return self.config.get('enable_torch_compile', True)
def enable_cpu_offload(self):
return self.config.get('enable_cpu_offload', True)
def maximum_batch_size(self):
return max(self.config.get('maximum_batch_size', 4), 1)
6 changes: 1 addition & 5 deletions discord_tron_client/classes/image_manipulation/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,11 +203,7 @@ def _run_pipeline(
):
original_stderr = sys.stderr
sys.stderr = self.tqdm_capture
batch_size = 4
if hardware.should_offload():
batch_size = 2
if hardware.should_sequential_offload():
batch_size = 1
batch_size = self.config.maximum_batch_size()
try:
alt_weight_algorithm = user_config.get("alt_weight_algorithm", False)
use_latent_result = user_config.get('latent_refiner', True)
Expand Down

0 comments on commit c733e0a

Please sign in to comment.