Skip to content

Commit

Permalink
added streamlit
Browse files Browse the repository at this point in the history
add
  • Loading branch information
WillReynolds5 committed Sep 16, 2024
1 parent b7d2b4f commit eb31b17
Show file tree
Hide file tree
Showing 44 changed files with 2,876 additions and 311 deletions.
552 changes: 322 additions & 230 deletions comfyui.log

Large diffs are not rendered by default.

670 changes: 670 additions & 0 deletions controlnet.ipynb

Large diffs are not rendered by default.

46 changes: 46 additions & 0 deletions dataset_generator/gen_character.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import json
from urllib import request, parse
import random


# This is the ComfyUI api prompt format.

# If you want it for a specific workflow you can "enable dev mode options"
# in the settings of the UI (gear beside the "Queue Size: ") this will enable
# a button on the UI to save workflows in api format.

# keep in mind ComfyUI is pre alpha software so this format will change a bit.

# this is the one for the default workflow

def gen_prompt(pos_prompt, neg_prompt, seed=None):
if seed is None:
seed = random.randint(0, 1000000)

# open json file called resposer.json
with open("reposer.json", "r") as f:
prompt_text = f.read()
# load json and format dict
parsed_prompt = json.loads(prompt_text)
# parsed_prompt["7"]["inputs"]["text"] = pos_prompt
# parsed_prompt["8"]["inputs"]["text"] = neg_prompt
# parsed_prompt["17"]["inputs"]["seed"] = seed
queue_prompt(prompt_text)

return parsed_prompt


def queue_prompt(prompt):
p = {"prompt": prompt}
data = json.dumps(p).encode('utf-8')
req = request.Request("http://127.0.0.1:8188/prompt", data=data)
resp = request.urlopen(req)
print(resp.read())


positive_prompt = "(hyper realistic animation), women standing on golf course in southern utah with bikini"
negative_prompt = "deformed face, disfigured eyes, deformed breasts"

prompt = gen_prompt(positive_prompt, negative_prompt)

queue_prompt(prompt)
99 changes: 99 additions & 0 deletions dataset_generator/gen_images_cascade.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import json
import uuid
import time
import os

from gen_character import gen_prompt
from llm import parse_json, inference

previous_prompts = []


def save_prompt_to_json(path, prompt_data):
"""Save prompt data to a JSON file."""
path = os.path.join('output', path, 'prompts_data.json')
try:
# Load existing data
with open(path, 'r') as file:
data = json.load(file)
except FileNotFoundError:
data = []

# Append the new data
data.append(prompt_data)

# Write the updated data back to the file
with open(path, 'w') as file:
json.dump(data, file, indent=4)


def generate_portraits():
global previous_prompts

for _ in range(10000):
try:
base_prompt = """
You are a “prompt” generating assistant. Your job is to generate image prompts for AI generated images of a female character for my NSFW roleplay chatbot. Each image set will be 10 different shots of one character in different sexual poses. Each set will take place in the same setting.
Prompts are lists of phrases, separated by commas. Each phrase in a prompt describes a different aspect of the character / scene being generated.
Parentheses can be used to give extra strength to a phrase in a prompt.
Each prompt has a “positive” and “negative”. The generated images will correlate with the positive prompt and not correlate with the negative.
Here is an example prompt:
[{
“positive”: “(hyper realistic animation), blonde girl with D sized breasts, background hotel room, seductive expression, nude, sitting on bed with legs spread”,
“negative”: “deformed face, disfigured eyes, deformed breasts”
},
{
“positive”: “(hyper realistic animation), blonde girl with D sized breasts, background hotel room, seductive expression, nude, squeezing breasts together.”,
“negative”: “deformed face, disfigured eyes, deformed breasts”
},
...]
Generate a new prompt that is very different from the following previous prompts:"""

# Dynamically add the previous prompts to the base prompt for each iteration
if previous_prompts:
for prompt in previous_prompts:
base_prompt += f"\n\n{prompt}"

base_prompt += "\nThe new prompt should contain the same setting and character with varied pose, expression, ect. Output only one JSON object with the keys positive and negative."

messages = [
{"role": "user", "content": base_prompt}
]

response = inference(messages)
parsed_response, success = parse_json(response)
image_id = str(uuid.uuid4().hex)
# make dir output/image_id
os.makedirs(os.path.join('output', image_id))

if success:
print(response)
# Update the previous prompts list with the new prompt
previous_prompts.append(response)
# Ensure we only keep the last two prompts
previous_prompts = previous_prompts[-10:]
comfyui_inference_basic(positive_prompt=parsed_response["positive"],
negative_prompt=parsed_response["negative"],
id=image_id)

# TODO: get the filename
# Save the prompt along with the filename and prompt details to a JSON file
prompt_data = {
"id": image_id,
"prompt": {
"positive": parsed_response["positive"],
"negative": parsed_response["negative"]
}
}
# save_prompt_to_json(image_id, prompt_data)

time.sleep(0.5)

except Exception as e:
print(e)


if __name__ == "__main__":
generate_portraits()
Loading

0 comments on commit eb31b17

Please sign in to comment.