202 lines
6.8 KiB
Python
202 lines
6.8 KiB
Python
import os
|
|
import json
|
|
import datetime
|
|
import time
|
|
import config
|
|
import threading
|
|
|
|
SAFETY_SETTINGS = [
|
|
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
|
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
|
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
|
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
|
]
|
|
|
|
# Thread-local storage for logging context
|
|
_log_context = threading.local()
|
|
|
|
def set_log_file(filepath):
|
|
_log_context.log_file = filepath
|
|
|
|
def set_log_callback(callback):
|
|
_log_context.callback = callback
|
|
|
|
def clean_json(text):
|
|
text = text.replace("```json", "").replace("```", "").strip()
|
|
# Robust extraction: find first { or [ and last } or ]
|
|
start_obj = text.find('{')
|
|
start_arr = text.find('[')
|
|
if start_obj == -1 and start_arr == -1: return text
|
|
if start_obj != -1 and (start_arr == -1 or start_obj < start_arr):
|
|
return text[start_obj:text.rfind('}')+1]
|
|
else:
|
|
return text[start_arr:text.rfind(']')+1]
|
|
|
|
# --- SHARED UTILS ---
|
|
def log(phase, msg):
|
|
timestamp = datetime.datetime.now().strftime('%H:%M:%S')
|
|
line = f"[{timestamp}] {phase:<15} | {msg}"
|
|
print(line)
|
|
|
|
# Write to thread-specific log file if set
|
|
if getattr(_log_context, 'log_file', None):
|
|
with open(_log_context.log_file, "a", encoding="utf-8") as f:
|
|
f.write(line + "\n")
|
|
|
|
# Trigger callback if set (e.g. for Database logging)
|
|
if getattr(_log_context, 'callback', None):
|
|
try: _log_context.callback(phase, msg)
|
|
except: pass
|
|
|
|
def load_json(path):
|
|
return json.load(open(path, 'r')) if os.path.exists(path) else None
|
|
|
|
def create_default_personas():
|
|
# Initialize empty personas file if it doesn't exist
|
|
if not os.path.exists(config.PERSONAS_DIR): os.makedirs(config.PERSONAS_DIR)
|
|
if not os.path.exists(config.PERSONAS_FILE):
|
|
try:
|
|
with open(config.PERSONAS_FILE, 'w') as f: json.dump({}, f, indent=2)
|
|
except: pass
|
|
|
|
def get_length_presets():
|
|
"""Returns a dict mapping Label -> Settings for use in main.py"""
|
|
presets = {}
|
|
for k, v in config.LENGTH_DEFINITIONS.items():
|
|
presets[v['label']] = v
|
|
return presets
|
|
|
|
def log_image_attempt(folder, img_type, prompt, filename, status, error=None, score=None, critique=None):
|
|
log_path = os.path.join(folder, "image_log.json")
|
|
entry = {
|
|
"timestamp": int(time.time()),
|
|
"type": img_type,
|
|
"prompt": prompt,
|
|
"filename": filename,
|
|
"status": status,
|
|
"error": str(error) if error else None,
|
|
"score": score,
|
|
"critique": critique
|
|
}
|
|
data = []
|
|
if os.path.exists(log_path):
|
|
try:
|
|
with open(log_path, 'r') as f: data = json.load(f)
|
|
except:
|
|
pass
|
|
data.append(entry)
|
|
with open(log_path, 'w') as f: json.dump(data, f, indent=2)
|
|
|
|
def get_run_folder(base_name):
|
|
if not os.path.exists(base_name): os.makedirs(base_name)
|
|
runs = [d for d in os.listdir(base_name) if d.startswith("run_")]
|
|
next_num = max([int(r.split("_")[1]) for r in runs if r.split("_")[1].isdigit()] + [0]) + 1
|
|
folder = os.path.join(base_name, f"run_{next_num}")
|
|
os.makedirs(folder)
|
|
return folder
|
|
|
|
def get_latest_run_folder(base_name):
|
|
if not os.path.exists(base_name): return None
|
|
runs = [d for d in os.listdir(base_name) if d.startswith("run_")]
|
|
if not runs: return None
|
|
runs.sort(key=lambda x: int(x.split('_')[1]) if x.split('_')[1].isdigit() else 0)
|
|
return os.path.join(base_name, runs[-1])
|
|
|
|
def log_usage(folder, model_label, usage_metadata=None, image_count=0):
|
|
if not folder or not os.path.exists(folder): return
|
|
|
|
log_path = os.path.join(folder, "usage_log.json")
|
|
|
|
entry = {
|
|
"timestamp": int(time.time()),
|
|
"model": model_label,
|
|
"input_tokens": 0,
|
|
"output_tokens": 0,
|
|
"images": image_count
|
|
}
|
|
|
|
if usage_metadata:
|
|
try:
|
|
entry["input_tokens"] = usage_metadata.prompt_token_count
|
|
entry["output_tokens"] = usage_metadata.candidates_token_count
|
|
except: pass
|
|
|
|
data = {"log": [], "totals": {"input_tokens": 0, "output_tokens": 0, "images": 0, "est_cost_usd": 0.0}}
|
|
|
|
if os.path.exists(log_path):
|
|
try:
|
|
loaded = json.load(open(log_path, 'r'))
|
|
if isinstance(loaded, list): data["log"] = loaded
|
|
else: data = loaded
|
|
except: pass
|
|
|
|
data["log"].append(entry)
|
|
|
|
# Recalculate totals
|
|
t_in = sum(x.get('input_tokens', 0) for x in data["log"])
|
|
t_out = sum(x.get('output_tokens', 0) for x in data["log"])
|
|
t_img = sum(x.get('images', 0) for x in data["log"])
|
|
|
|
cost = 0.0
|
|
for x in data["log"]:
|
|
m = x.get('model', '').lower()
|
|
i = x.get('input_tokens', 0)
|
|
o = x.get('output_tokens', 0)
|
|
imgs = x.get('images', 0)
|
|
|
|
if 'flash' in m:
|
|
cost += (i / 1_000_000 * 0.075) + (o / 1_000_000 * 0.30)
|
|
elif 'pro' in m or 'logic' in m:
|
|
cost += (i / 1_000_000 * 3.50) + (o / 1_000_000 * 10.50)
|
|
elif 'imagen' in m or imgs > 0:
|
|
cost += (imgs * 0.04)
|
|
|
|
data["totals"] = {
|
|
"input_tokens": t_in,
|
|
"output_tokens": t_out,
|
|
"images": t_img,
|
|
"est_cost_usd": round(cost, 4)
|
|
}
|
|
|
|
with open(log_path, 'w') as f: json.dump(data, f, indent=2)
|
|
|
|
def normalize_settings(bp):
|
|
"""
|
|
CRITICAL: Enforces defaults.
|
|
1. If series_metadata is missing, force it to SINGLE mode.
|
|
2. If length_settings is missing, force explicit numbers.
|
|
"""
|
|
# Force Series Default (1 Book)
|
|
if 'series_metadata' not in bp:
|
|
bp['series_metadata'] = {
|
|
"is_series": False,
|
|
"mode": "single",
|
|
"series_title": "Standalone",
|
|
"total_books_to_generate": 1
|
|
}
|
|
|
|
# Check for empty series count just in case
|
|
if bp['series_metadata'].get('total_books_to_generate') is None:
|
|
bp['series_metadata']['total_books_to_generate'] = 1
|
|
|
|
# Force Length Defaults
|
|
settings = bp.get('length_settings', {})
|
|
label = settings.get('label', 'Novella') # Default to Novella if nothing provided
|
|
|
|
# Get defaults based on label (or Novella if unknown)
|
|
presets = get_length_presets()
|
|
defaults = presets.get(label, presets['Novella'])
|
|
|
|
if 'chapters' not in settings: settings['chapters'] = defaults['chapters']
|
|
if 'words' not in settings: settings['words'] = defaults['words']
|
|
|
|
# Smart Depth Calculation (if not manually set)
|
|
if 'depth' not in settings:
|
|
c = int(settings['chapters'])
|
|
if c <= 5: settings['depth'] = 1
|
|
elif c <= 20: settings['depth'] = 2
|
|
elif c <= 40: settings['depth'] = 3
|
|
else: settings['depth'] = 4
|
|
|
|
bp['length_settings'] = settings
|
|
return bp |