-
Notifications
You must be signed in to change notification settings - Fork 19
/
process_params.py
103 lines (95 loc) · 3.36 KB
/
process_params.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from dataclasses import dataclass, field
from typing import List, Dict
from extensions.sd_smartprocess.file_manager import ImageData
@dataclass
class ProcessParams:
auto_save: bool = False
blip_initial_prompt = "a caption for this image is: "
booru_min_score: float = 0.75
caption: bool = False
captioners: Dict[str, bool] = field(default_factory=lambda: [])
char_threshold: float = 0.5
clip_append_artist: bool = False
clip_append_flavor: bool = False
clip_append_medium: bool = False
clip_append_movement: bool = False
clip_append_trending: bool = False
clip_max_flavors: int = 3
clip_use_v2: bool = False
crop: bool = False
crop_mode: str = "smart"
do_backup: bool = False
do_rename: bool = False
dst: str = ""
face_model: str = "Codeformers"
flip: bool = False
idefics2_prompt: str = "Describe this image in one detailed sentence, include the subject, location, style, and type of image."
interrogation_prompt: str = "Describe this image in one detailed sentence."
insert_subject: bool = False
load_in_4bit: bool = False
load_in_8bit: bool = False
max_clip_tokens: float = 1.0
max_size: int = 1024
max_tokens: int = 75
min_clip_tokens: float = 0.0
new_caption: str = ""
nl_captioners: Dict[str, bool] = field(default_factory=lambda: [])
num_beams: int = 5
pad: bool = False
replace_blip_caption: bool = True
replace_class: bool = False
restore_faces: bool = False
save_caption: bool = False
save_image: bool = False
src_files: List[ImageData] = field(default_factory=lambda: [])
subject: str = ""
subject_class: str = ""
tags_to_ignore: List[str] = field(default_factory=lambda: [])
threshold: float = 0.5
txt_action: str = "ignore"
upscale: bool = False
upscale_max: int = 4096
upscale_mode: str = "ratio"
upscale_ratio: float = 2.0
upscaler_1 = None
upscaler_2 = None
wd14_min_score: float = 0.75
image_path = None
def clip_params(self):
return {
"min_clip_tokens": self.min_clip_tokens,
"max_clip_tokens": self.max_clip_tokens,
"use_v2": self.clip_use_v2,
"append_flavor": self.clip_append_flavor,
"max_flavors": self.clip_max_flavors,
"append_medium": self.clip_append_medium,
"append_movement": self.clip_append_movement,
"append_artist": self.clip_append_artist,
"append_trending": self.clip_append_trending,
"num_beams": self.num_beams,
"clip_max_flavors": self.clip_max_flavors,
"blip_initial_prompt": self.blip_initial_prompt
}
def pre_only(self):
self.caption = False
self.upscale = False
self.restore_faces = False
def cap_only(self):
self.upscale = False
self.restore_faces = False
self.crop = False
self.pad = False
def post_only(self):
self.caption = False
self.crop = False
self.pad = False
@classmethod
def from_dict(cls, d):
instance = cls() # Get the singleton instance
for k, v in d.items():
k = k.replace("sp_", "") # Adjust the attribute name
if k == "class":
k = "subject_class"
if hasattr(instance, k):
setattr(instance, k, v)
return instance