Skip to content

Commit

Permalink
Added abilty to log prompts and info to one file instead of several (S…
Browse files Browse the repository at this point in the history
…ygil-Dev#476)

* Added abilty to log prompts and info to one file instead of several

This is my first pull request be gentle I made changes so instead of having several files with prompts with one for each file you can just have one file with the prompt info I also made sure the prompts would work with params in prompts

* Add files via upload

* Fixed Image to image not working

* Add files via upload

* Revert "Add files via upload"

This reverts commit bb10b63.

* updated log

Co-authored-by: hlky <[email protected]>
  • Loading branch information
2 people authored and jjisnow committed Sep 3, 2022
1 parent 85a94aa commit c7baacc
Showing 1 changed file with 66 additions and 31 deletions.
97 changes: 66 additions & 31 deletions webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) # i disagree with where you're putting it but since all guidefags are doing it this way, there you go
parser.add_argument("--gfpgan-gpu", type=int, help="run GFPGAN on specific gpu (overrides --gpu) ", default=0)
parser.add_argument("--gpu", type=int, help="choose which GPU to use if you have multiple", default=0)
parser.add_argument("--grid-format", type=str, help="png for lossless png files; jpg:quality for lossy jpeg; webp:quality for lossy webp, or webp:-compression for lossless webp", default="webp:60")
parser.add_argument("--grid-format", type=str, help="png for lossless png files; jpg:quality for lossy jpeg; webp:quality for lossy webp, or webp:-compression for lossless webp", default="jpg:95")
parser.add_argument("--ldsr-dir", type=str, help="LDSR directory", default=('./src/latent-diffusion' if os.path.exists('./src/latent-diffusion') else './LDSR'))
parser.add_argument("--n_rows", type=int, default=-1, help="rows in the grid; use -1 for autodetect and 0 for n_rows to be same as batch_size (default: -1)",)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats", default=False)
Expand Down Expand Up @@ -564,8 +564,8 @@ def check_prompt_length(prompt, comments):

comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")

def save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
def save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
normalize_prompt_weights, use_GFPGAN, write_info_files, write_sample_info_to_log_file, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
skip_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode, skip_metadata):
filename_i = os.path.join(sample_path_i, filename)
if not jpg_sample:
Expand All @@ -581,12 +581,12 @@ def save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, widt
if init_img is not None:
metadata.add_text("SD:denoising_strength", str(denoising_strength))
metadata.add_text("SD:GFPGAN", str(use_GFPGAN and GFPGAN is not None))
image.save(f"{filename_i}.webp", format='Webp', quality=grid_quality, method=6, exif=metadata)
image.save(f"{filename_i}.png", pnginfo=metadata)
else:
image.save(f"{filename_i}.webp", format='Webp', quality=grid_quality, method=6, )
image.save(f"{filename_i}.png")
else:
image.save(f"{filename_i}.webp", format='Webp', quality=grid_quality, method=6, )
if write_info_files:
image.save(f"{filename_i}.jpg", 'jpeg', quality=100, optimize=True)
if write_info_files or write_sample_info_to_log_file:
# toggles differ for txt2img vs. img2img:
offset = 0 if init_img is None else 2
toggles = []
Expand All @@ -607,8 +607,11 @@ def save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, widt
toggles.append(4 + offset)
if write_info_files:
toggles.append(5 + offset)
if write_sample_info_to_log_file:
toggles.append(6+offset)
if use_GFPGAN:
toggles.append(6 + offset)
toggles.append(7 + offset)

info_dict = dict(
target="txt2img" if init_img is None else "img2img",
prompt=prompts[i], ddim_steps=steps, toggles=toggles, sampler_name=sampler_name,
Expand All @@ -621,8 +624,31 @@ def save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, widt
#info_dict["init_mask"] = init_mask
info_dict["denoising_strength"] = denoising_strength
info_dict["resize_mode"] = resize_mode
with open(f"{filename_i}.yaml", "w", encoding="utf8") as f:
yaml.dump(info_dict, f, allow_unicode=True, width=10000)
if write_info_files:
with open(f"{filename_i}.yaml", "w", encoding="utf8") as f:
yaml.dump(info_dict, f, allow_unicode=True, width=10000)

if write_sample_info_to_log_file:
ignore_list = ["prompt", "target", "toggles", "ddim_eta", "batch_size"]
rename_dict = {"ddim_steps": "steps", "n_iter": "number", "sampler_name": "sampler"} #changes the name of parameters to match with dynamic parameters
sample_log_path = os.path.join(sample_path_i, "log.yaml")
log_dump = info_dict.get("prompt") # making sure the first item that is listed in the txt is the prompt text
for key, value in info_dict.items():
if key in ignore_list:
continue
found_key = rename_dict.get(key)

if key == "cfg_scale": #adds zeros to to cfg_scale necessary for dynamic params
value = str(value).zfill(2)

if found_key:
key = found_key
log_dump += f" {key} {value}"

log_dump = log_dump + " \n" #space at the end for dynamic params to accept the last param
with open(sample_log_path, "a", encoding="utf8") as log_file:
log_file.write(log_dump)



def get_next_sequence_number(path, prefix=''):
Expand Down Expand Up @@ -702,7 +728,7 @@ def classToArrays( items, seed, n_iter ):
texts.append( item.text )
parts.append( f"Seed: {itemseed}\n" + "\n".join(item.parts) )
seeds.append( itemseed )
itemseed += 1
itemseed += 1

return seeds, texts, parts

Expand All @@ -727,7 +753,7 @@ def process_images(
n_iter, steps, cfg_scale, width, height, prompt_matrix, use_GFPGAN, use_RealESRGAN, realesrgan_model_name,
fp, ddim_eta=0.0, do_not_save_grid=False, normalize_prompt_weights=True, init_img=None, init_mask=None,
keep_mask=False, mask_blur_strength=3, denoising_strength=0.75, resize_mode=None, uses_loopback=False,
uses_random_seed_loopback=False, sort_samples=True, write_info_files=True, jpg_sample=False,
uses_random_seed_loopback=False, sort_samples=True, write_info_files=True, write_sample_info_to_log_file=False, jpg_sample=False,
variant_amount=0.0, variant_seed=None,imgProcessorTask=True):
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
assert prompt is not None
Expand Down Expand Up @@ -863,7 +889,7 @@ def process_images(
if opt.optimized:
modelFS.to(device)



x_samples_ddim = (model if not opt.optimized else modelFS).decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
Expand Down Expand Up @@ -894,7 +920,7 @@ def process_images(
gfpgan_image = Image.fromarray(gfpgan_sample)
gfpgan_filename = original_filename + '-gfpgan'
save_sample(gfpgan_image, sample_path_i, gfpgan_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
normalize_prompt_weights, use_GFPGAN, write_info_files, write_sample_info_to_log_file, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
skip_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode, False)
output_images.append(gfpgan_image) #287
#if simple_templating:
Expand All @@ -910,7 +936,7 @@ def process_images(
esrgan_sample = output[:,:,::-1]
esrgan_image = Image.fromarray(esrgan_sample)
save_sample(esrgan_image, sample_path_i, esrgan_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
normalize_prompt_weights, use_GFPGAN,write_info_files, write_sample_info_to_log_file, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
skip_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode, False)
#output_images.append(esrgan_image) #287
#if simple_templating:
Expand All @@ -928,7 +954,7 @@ def process_images(
gfpgan_esrgan_sample = output[:,:,::-1]
gfpgan_esrgan_image = Image.fromarray(gfpgan_esrgan_sample)
save_sample(gfpgan_esrgan_image, sample_path_i, gfpgan_esrgan_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
normalize_prompt_weights, use_GFPGAN, write_info_files, write_sample_info_to_log_file, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
skip_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode, False)
output_images.append(gfpgan_esrgan_image) #287
#if simple_templating:
Expand All @@ -939,7 +965,7 @@ def process_images(

if not skip_save:
save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
normalize_prompt_weights, use_GFPGAN, write_info_files, write_sample_info_to_log_file, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
skip_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode, False)
if add_original_image or not simple_templating:
output_images.append(image)
Expand Down Expand Up @@ -1020,9 +1046,10 @@ def txt2img(prompt: str, ddim_steps: int, sampler_name: str, toggles: List[int],
skip_grid = 3 not in toggles
sort_samples = 4 in toggles
write_info_files = 5 in toggles
jpg_sample = 6 in toggles
use_GFPGAN = 7 in toggles
use_RealESRGAN = 8 in toggles
write_to_one_file = 6 in toggles
jpg_sample = 7 in toggles
use_GFPGAN = 8 in toggles
use_RealESRGAN = 9 in toggles

if sampler_name == 'PLMS':
sampler = PLMSSampler(model)
Expand Down Expand Up @@ -1075,6 +1102,7 @@ def sample(init_data, x, conditioning, unconditional_conditioning, sampler_name)
normalize_prompt_weights=normalize_prompt_weights,
sort_samples=sort_samples,
write_info_files=write_info_files,
write_sample_info_to_log_file=write_to_one_file,
jpg_sample=jpg_sample,
variant_amount=variant_amount,
variant_seed=variant_seed,
Expand Down Expand Up @@ -1136,8 +1164,8 @@ def flag(self, flag_data, flag_option=None, flag_index=None, username=None):


def img2img(prompt: str, image_editor_mode: str, init_info, mask_mode: str, mask_blur_strength: int, ddim_steps: int, sampler_name: str,
toggles: List[int], realesrgan_model_name: str, n_iter: int, cfg_scale: float, denoising_strength: float,
seed: int, height: int, width: int, resize_mode: int, fp = None):
toggles: List[int], realesrgan_model_name: str, n_iter: int, cfg_scale: float, denoising_strength: float,
seed: int, height: int, width: int, resize_mode: int, fp=None):
outpath = opt.outdir_img2img or opt.outdir or "outputs/img2img-samples"
err = False
seed = seed_to_int(seed)
Expand All @@ -1152,9 +1180,10 @@ def img2img(prompt: str, image_editor_mode: str, init_info, mask_mode: str, mask
skip_grid = 5 not in toggles
sort_samples = 6 in toggles
write_info_files = 7 in toggles
jpg_sample = 8 in toggles
use_GFPGAN = 9 in toggles
use_RealESRGAN = 10 in toggles
write_sample_info_to_log_file = 8 in toggles
jpg_sample = 9 in toggles
use_GFPGAN = 10 in toggles
use_RealESRGAN = 11 in toggles

if sampler_name == 'DDIM':
sampler = DDIMSampler(model)
Expand All @@ -1178,6 +1207,7 @@ def img2img(prompt: str, image_editor_mode: str, init_info, mask_mode: str, mask
init_img = init_img.convert("RGB")
init_img = resize_image(resize_mode, init_img, width, height)
init_mask = init_info["mask"]
print(init_mask)
init_mask = resize_image(resize_mode, init_mask, width, height)
keep_mask = mask_mode == 0
init_mask = init_mask.convert("RGB")
Expand Down Expand Up @@ -1227,7 +1257,7 @@ def init():
init_image = init_image.to(device)
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
init_latent = (model if not opt.optimized else modelFS).get_first_stage_encoding((model if not opt.optimized else modelFS).encode_first_stage(init_image)) # move to latent space

if opt.optimized:
mem = torch.cuda.memory_allocated()/1e6
modelFS.to("cpu")
Expand Down Expand Up @@ -1318,6 +1348,7 @@ def sample(init_data, x, conditioning, unconditional_conditioning, sampler_name)
uses_random_seed_loopback=random_seed_loopback,
sort_samples=sort_samples,
write_info_files=write_info_files,
write_sample_info_to_log_file=write_sample_info_to_log_file,
jpg_sample=jpg_sample,
)

Expand Down Expand Up @@ -1373,6 +1404,7 @@ def sample(init_data, x, conditioning, unconditional_conditioning, sampler_name)
uses_loopback=loopback,
sort_samples=sort_samples,
write_info_files=write_info_files,
write_sample_info_to_log_file=write_sample_info_to_log_file,
jpg_sample=jpg_sample,
)

Expand Down Expand Up @@ -1656,6 +1688,7 @@ def make_mask_image(r):
uses_loopback=False,
sort_samples=True,
write_info_files=True,
write_sample_info_to_log_file=False,
jpg_sample=False,
imgProcessorTask=True
)
Expand Down Expand Up @@ -1853,6 +1886,7 @@ def run_RealESRGAN(image, model_name: str):
'Save grid',
'Sort samples by prompt',
'Write sample info files',
'write sample info to log file',
'jpg samples',
]

Expand All @@ -1863,9 +1897,9 @@ def run_RealESRGAN(image, model_name: str):

txt2img_defaults = {
'prompt': '',
'ddim_steps': 25,
'ddim_steps': 50,
'toggles': [1, 2, 3],
'sampler_name': 'k_euler_a',
'sampler_name': 'k_lms',
'ddim_eta': 0.0,
'n_iter': 1,
'batch_size': 1,
Expand Down Expand Up @@ -1912,6 +1946,7 @@ def run_RealESRGAN(image, model_name: str):
'Save grid',
'Sort samples by prompt',
'Write sample info files',
'Write sample info to one file',
'jpg samples',
]
"""
Expand All @@ -1934,9 +1969,9 @@ def run_RealESRGAN(image, model_name: str):

img2img_defaults = {
'prompt': '',
'ddim_steps': 25,
'ddim_steps': 50,
'toggles': [1, 4, 5],
'sampler_name': 'k_euler_a',
'sampler_name': 'k_lms',
'ddim_eta': 0.0,
'n_iter': 1,
'batch_size': 1,
Expand Down Expand Up @@ -2040,7 +2075,7 @@ def run(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
gradio_params = {
'show_error': True,
'show_error': True,
'server_name': '0.0.0.0',
'server_port': opt.port,
'share': opt.share
Expand Down

0 comments on commit c7baacc

Please sign in to comment.