File size: 4,098 Bytes
7b664dc
 
 
 
 
ffe9258
7b664dc
2123fad
 
 
23c50d2
 
78e6f58
 
 
2123fad
 
f9b26a5
e88e5da
7b664dc
9973325
23c50d2
150be19
 
9973325
 
ae10612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3a7387
78e6f58
ae10612
 
 
 
 
 
 
 
 
 
 
 
738ddc1
ae10612
f3a7387
7b664dc
 
ae10612
7b664dc
ed590df
7b664dc
 
27b9508
6d847d3
7b664dc
27b9508
ae10612
7b664dc
78e6f58
 
ae10612
78e6f58
23c50d2
 
78e6f58
23c50d2
ae10612
 
23c50d2
ae10612
78e6f58
 
ae10612
 
 
 
 
 
 
738ddc1
 
78e6f58
 
7b664dc
ae10612
 
 
7b664dc
9008411
ed4c076
9008411
78e6f58
 
 
ae10612
 
ffe9258
ae10612
7b664dc
78e6f58
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#!/usr/bin/env python
# coding: utf-8

import os
import openai
import gradio as gr

import torch
from diffusers import StableDiffusionPipeline
from torch import autocast

from contextlib import nullcontext
#from PIL import Image
#from torchvision import transforms



openai.api_key = os.getenv('openaikey')
authtoken = os.getenv('authtoken')

device = "cuda" if torch.cuda.is_available() else "cpu"
context = autocast if device == "cuda" else nullcontext
dtype = torch.float16 if device == "cuda" else torch.float32
pipe = StableDiffusionPipeline.from_pretrained("stale2000/sd-dnditem", torch_dtype=dtype, use_auth_token=authtoken)  
pipe = pipe.to(device)

disable_safety = True

if disable_safety:
  def null_safety(images, **kwargs):
      return images, False
  pipe.safety_checker = null_safety

def create_files():
    directory = 'C:\\Users\\brcwa\\OneDrive\\Desktop\\destinyCaptures\\dnd\\fullcaptionsimple\\full'
    for filename2 in os.listdir(directory):
        if not filename2.endswith('txt'):
            continue
        f = os.path.join(directory, filename2)
        # checking if it is a file
        if os.path.isfile(f):
            text_file = open(f, "r")
            lines = text_file.read()
            print(lines.split(',')[1] + "," + lines.split(',')[1])

#create_files()

def createGPTPrompt(item_type, description):
    return item_type.split(",")[0].split(" ")[-1] + " of " + description

def convert_lines(lines):
    key_arr = []
    key_hash = {}
    for line in lines:
        key = line.split(",")[0]
        val = line.split(",")[1]
        key_arr.append(key)
        key_hash[key] = val

    return key_arr, key_hash

def predict(dropdown, style_dropdown, manual_gpt_replacement, manual_sd_prompt, n_samples, history=[]):

    # gpt3
    sd_input = ""
    gpt_input = ""

    description = style_dropdown
    if manual_sd_prompt != '':
        gpt_input = manual_gpt_replacement
    else:
        gpt_input = "Describe the mechanics of a 5th Edition DnD item called '" + createGPTPrompt(dropdown, description) + "' :"
    
    if manual_sd_prompt != '':
        sd_input = manual_sd_prompt
    else:
        sd_input = "dnditem, " + dropdown + ", " + style_hashmap[style_dropdown]  + ", circle inner background and white outerbackground"

    
    response = openai.Completion.create(
    model="text-davinci-003",
    prompt=gpt_input,
    temperature=0.9,
    max_tokens=200,
    top_p=1,
    frequency_penalty=0,
    presence_penalty=0.6)
    
    # tokenize the new input sentence
    responseText = response["choices"][0]["text"]
    history.append((sd_input, responseText))


    #img generation
    scale = 5.5

    #with autocast("cuda"):
    #  images = pipe(n_samples*[prompt], guidance_scale=scale).images

    with context("cuda"):
        images = pipe(n_samples*[sd_input], guidance_scale=scale, num_inference_steps=40).images

    
    return history, history, images



#inputText = gr.Textbox(placeholder="input query")
manual_gpt_query = gr.Textbox(placeholder="Input any query here, to replace the gpt query builder entirely.")
manual_sd_prompt = gr.Textbox(placeholder="Input any query here, to replace the gpt query builder entirely.")



choiceArr = ["a pair of boots", "a cloak", "a pair of gloves", "a helmet", "a necklace", "a ring", "a robe", "a rod", "a shield", "a staff", "a sword", "a wand"]
dropdown = gr.Dropdown(label= "Item Type", choices=choiceArr, value="a pair of boots")



text_file = open("styles.txt", "r")
lines = text_file.read().split('\n')
dropdown_arr, style_hashmap = convert_lines(lines)


style_dropdown = gr.Dropdown(label= "Item Ability and Style", choices=dropdown_arr, value="ultimate evil")

output_img = gr.Gallery(label="Generated image")
output_img.style(grid=2)

step_slide = gr.Slider(1, 4, value=2, step=1),
slide = gr.Slider(label="Number of Images Generated", minimum=1, maximum=4, value=2, step=1)
gr.Interface(fn=predict,
             inputs=[dropdown, style_dropdown, manual_gpt_query,manual_sd_prompt,slide,'state'],
            
             outputs=["chatbot",'state', output_img]).launch()