Final changes and update
This commit is contained in:
@@ -1,5 +1,4 @@
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import warnings
|
||||
@@ -77,7 +76,7 @@ def select_best_models(force_refresh=False):
|
||||
|
||||
try:
|
||||
utils.log("SYSTEM", "Refreshing AI model list from API...")
|
||||
models = [m.name for m in genai.list_models() if 'generateContent' in m.supported_generation_methods]
|
||||
models = [m.name for m in genai.list_models() if 'generateContent' in m.supported_generation_methods and 'gemini' in m.name.lower()]
|
||||
|
||||
bootstrapper = "models/gemini-1.5-flash"
|
||||
if bootstrapper not in models:
|
||||
@@ -88,8 +87,12 @@ def select_best_models(force_refresh=False):
|
||||
model = genai.GenerativeModel(bootstrapper)
|
||||
prompt = f"Analyze this list of available Google Gemini models:\n{json.dumps(models)}\n\nSelect the best model for each of these three roles based on these criteria:\n- Most recent version with best features and ability.\n- Beta versions are okay, but avoid 'experimental' if a stable beta/prod version exists.\n- Consider quota efficiency (Flash is cheaper/faster, Pro is smarter).\n\nROLES:\n1. LOGIC: For complex reasoning, JSON structuring, and plot planning.\n2. WRITER: For creative fiction writing, prose generation, and speed.\n3. ARTIST: For generating visual art prompts and design instructions.\n\nAlso provide a 'ranking' list of ALL models analyzed, ordered from best/most useful to worst/least useful, with a short reason.\n\nReturn JSON: {{ 'logic': {{ 'model': 'model_name', 'reason': 'reasoning' }}, 'writer': {{ 'model': 'model_name', 'reason': 'reasoning' }}, 'artist': {{ 'model': 'model_name', 'reason': 'reasoning' }}, 'ranking': [ {{ 'model': 'model_name', 'reason': 'reasoning' }} ] }}"
|
||||
|
||||
response = model.generate_content(prompt)
|
||||
selection = json.loads(utils.clean_json(response.text))
|
||||
try:
|
||||
response = model.generate_content(prompt)
|
||||
selection = json.loads(utils.clean_json(response.text))
|
||||
except Exception as e:
|
||||
utils.log("SYSTEM", f"Model selection generation failed (Safety/Format): {e}")
|
||||
raise e
|
||||
|
||||
if not os.path.exists(config.DATA_DIR): os.makedirs(config.DATA_DIR)
|
||||
with open(cache_path, 'w') as f:
|
||||
|
||||
@@ -18,7 +18,7 @@ def compile_files(bp, ms, folder):
|
||||
if meta.get('filename'):
|
||||
safe = meta['filename']
|
||||
else:
|
||||
safe = "".join([c for c in title if c.isalnum() or c=='_']).replace(" ", "_")
|
||||
safe = utils.sanitize_filename(title)
|
||||
|
||||
doc = Document(); doc.add_heading(title, 0)
|
||||
book = epub.EpubBook(); book.set_title(title); spine = ['nav']
|
||||
@@ -29,6 +29,9 @@ def compile_files(bp, ms, folder):
|
||||
with open(cover_path, 'rb') as f:
|
||||
book.set_cover("cover.png", f.read())
|
||||
|
||||
# Ensure manuscript is sorted correctly before compiling
|
||||
ms.sort(key=utils.chapter_sort_key)
|
||||
|
||||
for c in ms:
|
||||
# Determine filename/type
|
||||
num_str = str(c['num']).lower()
|
||||
|
||||
@@ -2,11 +2,13 @@ import os
|
||||
import json
|
||||
import shutil
|
||||
import textwrap
|
||||
import subprocess
|
||||
import requests
|
||||
import google.generativeai as genai
|
||||
from . import utils
|
||||
import config
|
||||
from modules import ai
|
||||
from rich.prompt import Confirm
|
||||
|
||||
try:
|
||||
from PIL import Image, ImageDraw, ImageFont, ImageStat
|
||||
@@ -99,7 +101,7 @@ def generate_blurb(bp, folder):
|
||||
except:
|
||||
utils.log("MARKETING", "Failed to generate blurb.")
|
||||
|
||||
def generate_cover(bp, folder, tracking=None, feedback=None):
|
||||
def generate_cover(bp, folder, tracking=None, feedback=None, interactive=False):
|
||||
if not HAS_PIL:
|
||||
utils.log("MARKETING", "Pillow not installed. Skipping image cover.")
|
||||
return
|
||||
@@ -125,6 +127,10 @@ def generate_cover(bp, folder, tracking=None, feedback=None):
|
||||
regenerate_image = True
|
||||
design_instruction = ""
|
||||
|
||||
# If existing art exists and no feedback provided, preserve it (Keep Cover feature)
|
||||
if os.path.exists(os.path.join(folder, "cover_art.png")) and not feedback:
|
||||
regenerate_image = False
|
||||
|
||||
if feedback and feedback.strip():
|
||||
utils.log("MARKETING", f"Analyzing feedback: '{feedback}'...")
|
||||
analysis_prompt = f"""
|
||||
@@ -150,7 +156,7 @@ def generate_cover(bp, folder, tracking=None, feedback=None):
|
||||
Act as an Art Director. Design the cover for this book.
|
||||
TITLE: {meta.get('title')}
|
||||
GENRE: {meta.get('genre')}
|
||||
TONE: {meta.get('style', {}).get('tone')}
|
||||
TONE: {meta.get('style', {}).get('tone', 'Balanced')}
|
||||
|
||||
CRITICAL INSTRUCTIONS:
|
||||
1. CHARACTER APPEARANCE: Strictly adhere to the provided character descriptions (hair, eyes, race, age, clothing) in the Visual Context.
|
||||
@@ -214,6 +220,21 @@ def generate_cover(bp, folder, tracking=None, feedback=None):
|
||||
utils.log("MARKETING", f" -> Image Score: {score}/10. Critique: {critique}")
|
||||
utils.log_image_attempt(folder, "cover", art_prompt, f"cover_art_{i}.png", status, score=score, critique=critique)
|
||||
|
||||
if interactive:
|
||||
# Open image for review
|
||||
try:
|
||||
if os.name == 'nt': os.startfile(attempt_path)
|
||||
elif sys.platform == 'darwin': subprocess.call(('open', attempt_path))
|
||||
else: subprocess.call(('xdg-open', attempt_path))
|
||||
except: pass
|
||||
|
||||
if Confirm.ask(f"Accept cover attempt {i} (Score: {score})?", default=True):
|
||||
best_img_path = attempt_path
|
||||
break
|
||||
else:
|
||||
utils.log("MARKETING", "User rejected cover. Retrying...")
|
||||
continue
|
||||
|
||||
if score > best_img_score:
|
||||
best_img_score = score
|
||||
best_img_path = attempt_path
|
||||
@@ -345,6 +366,6 @@ def generate_cover(bp, folder, tracking=None, feedback=None):
|
||||
except Exception as e:
|
||||
utils.log("MARKETING", f"Cover generation failed: {e}")
|
||||
|
||||
def create_marketing_assets(bp, folder, tracking=None):
|
||||
def create_marketing_assets(bp, folder, tracking=None, interactive=False):
|
||||
generate_blurb(bp, folder)
|
||||
generate_cover(bp, folder, tracking)
|
||||
generate_cover(bp, folder, tracking, interactive=interactive)
|
||||
295
modules/story.py
295
modules/story.py
@@ -153,20 +153,6 @@ def enrich(bp, folder, context=""):
|
||||
def plan_structure(bp, folder):
|
||||
utils.log("ARCHITECT", "Creating structure...")
|
||||
|
||||
if 'plot_outline' in bp and isinstance(bp['plot_outline'], dict):
|
||||
po = bp['plot_outline']
|
||||
if 'beats' in po and isinstance(po['beats'], list):
|
||||
events = []
|
||||
for act in po['beats']:
|
||||
if 'plot_points' in act and isinstance(act['plot_points'], list):
|
||||
for pp in act['plot_points']:
|
||||
desc = pp.get('description')
|
||||
point = pp.get('point', 'Event')
|
||||
if desc: events.append({"description": desc, "purpose": point})
|
||||
if events:
|
||||
utils.log("ARCHITECT", f"Using {len(events)} events from Plot Outline as base structure.")
|
||||
return events
|
||||
|
||||
structure_type = bp.get('book_metadata', {}).get('structure_prompt')
|
||||
|
||||
if not structure_type:
|
||||
@@ -183,13 +169,6 @@ def plan_structure(bp, folder):
|
||||
structure_type = structures.get(label, "Create a 3-Act Structure.")
|
||||
|
||||
beats_context = []
|
||||
if 'plot_outline' in bp and isinstance(bp['plot_outline'], dict):
|
||||
po = bp['plot_outline']
|
||||
if 'beats' in po:
|
||||
for act in po['beats']:
|
||||
beats_context.append(f"ACT {act.get('act', '?')}: {act.get('title', '')} - {act.get('summary', '')}")
|
||||
for pp in act.get('plot_points', []):
|
||||
beats_context.append(f" * {pp.get('point', 'Beat')}: {pp.get('description', '')}")
|
||||
|
||||
if not beats_context:
|
||||
beats_context = bp.get('plot_beats', [])
|
||||
@@ -206,13 +185,6 @@ def expand(events, pass_num, target_chapters, bp, folder):
|
||||
utils.log("ARCHITECT", f"Expansion pass {pass_num} | Current Beats: {len(events)} | Target Chaps: {target_chapters}")
|
||||
|
||||
beats_context = []
|
||||
if 'plot_outline' in bp and isinstance(bp['plot_outline'], dict):
|
||||
po = bp['plot_outline']
|
||||
if 'beats' in po:
|
||||
for act in po['beats']:
|
||||
beats_context.append(f"ACT {act.get('act', '?')}: {act.get('title', '')} - {act.get('summary', '')}")
|
||||
for pp in act.get('plot_points', []):
|
||||
beats_context.append(f" * {pp.get('point', 'Beat')}: {pp.get('description', '')}")
|
||||
|
||||
if not beats_context:
|
||||
beats_context = bp.get('plot_beats', [])
|
||||
@@ -565,7 +537,8 @@ def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None):
|
||||
|
||||
prev_context_block = ""
|
||||
if prev_content:
|
||||
prev_context_block = f"\nPREVIOUS CHAPTER TEXT (For Tone & Continuity):\n{prev_content}\n"
|
||||
trunc_content = prev_content[-3000:] if len(prev_content) > 3000 else prev_content
|
||||
prev_context_block = f"\nPREVIOUS CHAPTER TEXT (For Tone & Continuity):\n{trunc_content}\n"
|
||||
|
||||
prompt = f"""
|
||||
Write Chapter {chap['chapter_number']}: {chap['title']}
|
||||
@@ -617,7 +590,10 @@ def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None):
|
||||
return f"## Chapter {chap['chapter_number']} Failed\n\nError: {e}"
|
||||
|
||||
# Refinement Loop
|
||||
max_attempts = 9
|
||||
max_attempts = 3
|
||||
SCORE_AUTO_ACCEPT = 9
|
||||
SCORE_PASSING = 7
|
||||
|
||||
best_score = 0
|
||||
best_text = current_text
|
||||
past_critiques = []
|
||||
@@ -635,8 +611,8 @@ def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None):
|
||||
|
||||
utils.log("WRITER", f" Score: {score}/10. Critique: {critique}")
|
||||
|
||||
if score >= 8:
|
||||
utils.log("WRITER", " Quality threshold met.")
|
||||
if score >= SCORE_AUTO_ACCEPT:
|
||||
utils.log("WRITER", " 🌟 Auto-Accept threshold met.")
|
||||
return current_text
|
||||
|
||||
if score > best_score:
|
||||
@@ -644,8 +620,12 @@ def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None):
|
||||
best_text = current_text
|
||||
|
||||
if attempt == max_attempts:
|
||||
utils.log("WRITER", " Max attempts reached. Using best version.")
|
||||
return best_text
|
||||
if best_score >= SCORE_PASSING:
|
||||
utils.log("WRITER", f" ✅ Max attempts reached. Accepting best score ({best_score}).")
|
||||
return best_text
|
||||
else:
|
||||
utils.log("WRITER", f" ⚠️ Quality low ({best_score}/{SCORE_PASSING}) but max attempts reached. Proceeding.")
|
||||
return best_text
|
||||
|
||||
utils.log("WRITER", f" -> Refining Ch {chap['chapter_number']} based on feedback...")
|
||||
|
||||
@@ -692,8 +672,15 @@ def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None):
|
||||
|
||||
def harvest_metadata(bp, folder, full_manuscript):
|
||||
utils.log("HARVESTER", "Scanning for new characters...")
|
||||
full_text = "\n".join([c['content'] for c in full_manuscript])[:50000]
|
||||
prompt = f"Identify new significant characters NOT in:\n{json.dumps(bp['characters'])}\nTEXT:\n{full_text}\nReturn JSON: {{'new_characters': [{{'name':'...', 'role':'...', 'description':'...'}}]}}"
|
||||
full_text = "\n".join([c.get('content', '') for c in full_manuscript])[:500000]
|
||||
|
||||
prompt = f"""
|
||||
Analyze this manuscript text.
|
||||
EXISTING CHARACTERS: {json.dumps(bp['characters'])}
|
||||
|
||||
TASK: Identify NEW significant characters that appear in the text but are missing from the list.
|
||||
RETURN JSON: {{'new_characters': [{{'name':'...', 'role':'...', 'description':'...'}}]}}
|
||||
"""
|
||||
try:
|
||||
response = ai.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
||||
@@ -722,7 +709,7 @@ def update_persona_sample(bp, folder):
|
||||
if not os.path.exists(config.PERSONAS_DIR): os.makedirs(config.PERSONAS_DIR)
|
||||
|
||||
meta = bp.get('book_metadata', {})
|
||||
safe_title = "".join([c for c in meta.get('title', 'book') if c.isalnum() or c=='_']).replace(" ", "_")[:20]
|
||||
safe_title = utils.sanitize_filename(meta.get('title', 'book'))[:20]
|
||||
timestamp = int(time.time())
|
||||
filename = f"sample_{safe_title}_{timestamp}.txt"
|
||||
filepath = os.path.join(config.PERSONAS_DIR, filename)
|
||||
@@ -781,4 +768,236 @@ def refine_bible(bible, instruction, folder):
|
||||
return new_data
|
||||
except Exception as e:
|
||||
utils.log("SYSTEM", f"Refinement failed: {e}")
|
||||
return None
|
||||
return None
|
||||
|
||||
def analyze_consistency(bp, manuscript, folder):
|
||||
utils.log("EDITOR", "Analyzing manuscript for continuity errors...")
|
||||
|
||||
if not manuscript: return {"issues": ["No manuscript found."], "score": 0}
|
||||
if not bp: return {"issues": ["No blueprint found."], "score": 0}
|
||||
|
||||
# Summarize chapters to save tokens (pass full text if small enough, but usually summaries are safer)
|
||||
chapter_summaries = []
|
||||
for ch in manuscript:
|
||||
text = ch.get('content', '')
|
||||
# Take first 1000 and last 1000 chars to capture setup and resolution of scenes
|
||||
excerpt = text[:1000] + "\n...\n" + text[-1000:] if len(text) > 2000 else text
|
||||
chapter_summaries.append(f"Ch {ch.get('num')}: {excerpt}")
|
||||
|
||||
context = "\n".join(chapter_summaries)
|
||||
|
||||
prompt = f"""
|
||||
Act as a Continuity Editor. Analyze this book summary for plot holes and inconsistencies.
|
||||
|
||||
CHARACTERS: {json.dumps(bp.get('characters', []))}
|
||||
|
||||
CHAPTER SUMMARIES:
|
||||
{context}
|
||||
|
||||
TASK:
|
||||
Identify 3-5 major continuity errors or plot holes (e.g. dead characters appearing, teleporting, forgotten injuries, motivation flips).
|
||||
If none, say "No major issues found."
|
||||
|
||||
Return JSON: {{ "issues": ["Issue 1", "Issue 2"], "score": 8, "summary": "Brief overall assessment." }} (Score 1-10 on logical consistency)
|
||||
"""
|
||||
try:
|
||||
response = ai.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
||||
return json.loads(utils.clean_json(response.text))
|
||||
except Exception as e:
|
||||
return {"issues": [f"Analysis failed: {e}"], "score": 0, "summary": "Error during analysis."}
|
||||
|
||||
def rewrite_chapter_content(bp, manuscript, chapter_num, instruction, folder):
|
||||
utils.log("WRITER", f"Rewriting Ch {chapter_num} with instruction: {instruction}")
|
||||
|
||||
# Find target chapter and previous context
|
||||
target_chap = next((c for c in manuscript if c['num'] == chapter_num), None)
|
||||
if not target_chap: return None
|
||||
|
||||
prev_text = ""
|
||||
|
||||
# Determine previous chapter logic
|
||||
prev_chap = None
|
||||
if isinstance(chapter_num, int):
|
||||
prev_chap = next((c for c in manuscript if c['num'] == chapter_num - 1), None)
|
||||
elif str(chapter_num).lower() == "epilogue":
|
||||
# Find the highest numbered chapter
|
||||
numbered_chaps = [c for c in manuscript if isinstance(c['num'], int)]
|
||||
if numbered_chaps:
|
||||
prev_chap = max(numbered_chaps, key=lambda x: x['num'])
|
||||
|
||||
if prev_chap:
|
||||
prev_text = prev_chap.get('content', '')[-3000:] # Last 3000 chars for context
|
||||
|
||||
meta = bp.get('book_metadata', {})
|
||||
|
||||
prompt = f"""
|
||||
Act as a Ghostwriter. Rewrite Chapter {chapter_num}: {target_chap.get('title', '')}
|
||||
|
||||
USER INSTRUCTION (PRIMARY DIRECTIVE):
|
||||
{instruction}
|
||||
|
||||
STORY CONTEXT:
|
||||
- Title: {meta.get('title')}
|
||||
- Genre: {meta.get('genre')}
|
||||
- Tone: {meta.get('style', {}).get('tone')}
|
||||
|
||||
PREVIOUS CHAPTER ENDING (Continuity):
|
||||
{prev_text}
|
||||
|
||||
CURRENT DRAFT (Reference only - feel free to change significantly based on instruction):
|
||||
{target_chap.get('content', '')[:5000]}
|
||||
|
||||
CHARACTERS:
|
||||
{json.dumps(bp.get('characters', []))}
|
||||
|
||||
TASK:
|
||||
Write the full chapter content in Markdown.
|
||||
- Ensure it flows naturally from the previous chapter ending.
|
||||
- Follow the User Instruction strictly, even if it contradicts the current draft.
|
||||
- Maintain the established character voices.
|
||||
"""
|
||||
|
||||
try:
|
||||
response = ai.model_writer.generate_content(prompt)
|
||||
utils.log_usage(folder, "writer-flash", response.usage_metadata)
|
||||
return response.text
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f"Rewrite failed: {e}")
|
||||
return None
|
||||
|
||||
def check_and_propagate(bp, manuscript, changed_chap_num, folder):
|
||||
utils.log("WRITER", f"Checking ripple effects from Ch {changed_chap_num}...")
|
||||
|
||||
# Find the changed chapter
|
||||
changed_chap = next((c for c in manuscript if c['num'] == changed_chap_num), None)
|
||||
if not changed_chap: return None
|
||||
|
||||
# Summarize the change to save tokens
|
||||
change_summary_prompt = f"Summarize the key events and ending state of this chapter:\n{changed_chap.get('content', '')[:10000]}"
|
||||
try:
|
||||
resp = ai.model_writer.generate_content(change_summary_prompt)
|
||||
current_context = resp.text
|
||||
except:
|
||||
current_context = changed_chap.get('content', '')[-2000:] # Fallback
|
||||
|
||||
original_change_context = current_context
|
||||
# Iterate subsequent chapters
|
||||
sorted_ms = sorted(manuscript, key=utils.chapter_sort_key)
|
||||
start_index = -1
|
||||
for i, c in enumerate(sorted_ms):
|
||||
if str(c['num']) == str(changed_chap_num):
|
||||
start_index = i
|
||||
break
|
||||
|
||||
if start_index == -1 or start_index == len(sorted_ms) - 1:
|
||||
return None
|
||||
|
||||
changes_made = False
|
||||
consecutive_no_changes = 0
|
||||
potential_impact_chapters = []
|
||||
|
||||
for i in range(start_index + 1, len(sorted_ms)):
|
||||
target_chap = sorted_ms[i]
|
||||
|
||||
# Optimization: If 2 chapters in a row didn't need changes, assume the ripple has stopped locally.
|
||||
# Perform Long-Range Scan to see if we need to jump ahead.
|
||||
if consecutive_no_changes >= 2:
|
||||
if target_chap['num'] not in potential_impact_chapters:
|
||||
# Check if we have pending future flags
|
||||
future_flags = [n for n in potential_impact_chapters if isinstance(n, int) and isinstance(target_chap['num'], int) and n > target_chap['num']]
|
||||
|
||||
if not future_flags:
|
||||
# No pending flags. Scan remaining chapters.
|
||||
remaining_chaps = sorted_ms[i:]
|
||||
if not remaining_chaps: break
|
||||
|
||||
utils.log("WRITER", " -> Short-term ripple dissipated. Scanning remaining chapters for long-range impacts...")
|
||||
|
||||
chapter_summaries = []
|
||||
for rc in remaining_chaps:
|
||||
text = rc.get('content', '')
|
||||
excerpt = text[:500] + "\n...\n" + text[-500:] if len(text) > 1000 else text
|
||||
chapter_summaries.append(f"Ch {rc['num']}: {excerpt}")
|
||||
|
||||
scan_prompt = f"""
|
||||
We are propagating a change from Chapter {changed_chap_num}.
|
||||
The immediate ripple effect seems to have stopped.
|
||||
|
||||
ORIGINAL CHANGE CONTEXT:
|
||||
{original_change_context}
|
||||
|
||||
REMAINING CHAPTERS:
|
||||
{json.dumps(chapter_summaries)}
|
||||
|
||||
TASK:
|
||||
Identify any later chapters that mention items, characters, or locations involved in the Change Context.
|
||||
Return a JSON list of Chapter Numbers (integers) that might need updating.
|
||||
Example: [5, 12]
|
||||
If none, return [].
|
||||
"""
|
||||
|
||||
try:
|
||||
resp = ai.model_logic.generate_content(scan_prompt)
|
||||
potential_impact_chapters = json.loads(utils.clean_json(resp.text))
|
||||
if not isinstance(potential_impact_chapters, list): potential_impact_chapters = []
|
||||
# Ensure integers
|
||||
potential_impact_chapters = [int(x) for x in potential_impact_chapters if str(x).isdigit()]
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f" -> Scan failed: {e}. Stopping.")
|
||||
break
|
||||
|
||||
if not potential_impact_chapters:
|
||||
utils.log("WRITER", " -> No long-range impacts detected. Stopping.")
|
||||
break
|
||||
else:
|
||||
utils.log("WRITER", f" -> Detected potential impact in chapters: {potential_impact_chapters}")
|
||||
|
||||
# If current chapter is still not in the list, skip it
|
||||
# Safety: Always check non-integer chapters (Prologue/Epilogue) to be safe
|
||||
if isinstance(target_chap['num'], int) and target_chap['num'] not in potential_impact_chapters:
|
||||
utils.log("WRITER", f" -> Skipping Ch {target_chap['num']} (Not flagged).")
|
||||
continue
|
||||
|
||||
utils.log("WRITER", f" -> Checking Ch {target_chap['num']} for continuity...")
|
||||
|
||||
prompt = f"""
|
||||
Chapter {changed_chap_num} was just rewritten.
|
||||
NEW CONTEXT/ENDING of previous section:
|
||||
{current_context}
|
||||
|
||||
CURRENT TEXT of Ch {target_chap['num']}:
|
||||
{target_chap['content'][:5000]}... (truncated)
|
||||
|
||||
TASK:
|
||||
Does Ch {target_chap['num']} need to be rewritten to maintain continuity with the new context?
|
||||
- If YES (e.g. references old events that changed, character states don't match): Rewrite the chapter fully in Markdown.
|
||||
- If NO (it fits fine): Return ONLY the string "NO_CHANGE".
|
||||
"""
|
||||
|
||||
try:
|
||||
response = ai.model_writer.generate_content(prompt)
|
||||
text = response.text.strip()
|
||||
|
||||
if "NO_CHANGE" in text[:20] and len(text) < 100:
|
||||
utils.log("WRITER", f" -> Ch {target_chap['num']} is consistent.")
|
||||
# Update context for next iteration using existing text
|
||||
current_context = f"Ch {target_chap['num']} Summary: " + target_chap.get('content', '')[-2000:]
|
||||
consecutive_no_changes += 1
|
||||
else:
|
||||
utils.log("WRITER", f" -> Rewriting Ch {target_chap['num']} to fix continuity.")
|
||||
target_chap['content'] = text
|
||||
changes_made = True
|
||||
# Update context with NEW text
|
||||
current_context = f"Ch {target_chap['num']} Summary: " + text[-2000:]
|
||||
consecutive_no_changes = 0
|
||||
|
||||
# Save immediately to prevent data loss if subsequent checks fail
|
||||
try:
|
||||
with open(os.path.join(folder, "manuscript.json"), 'w') as f: json.dump(manuscript, f, indent=2)
|
||||
except: pass
|
||||
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f" -> Check failed: {e}")
|
||||
|
||||
return manuscript if changes_made else None
|
||||
@@ -21,6 +21,14 @@ def set_log_file(filepath):
|
||||
def set_log_callback(callback):
|
||||
_log_context.callback = callback
|
||||
|
||||
def set_progress_callback(callback):
|
||||
_log_context.progress_callback = callback
|
||||
|
||||
def update_progress(percent):
|
||||
if getattr(_log_context, 'progress_callback', None):
|
||||
try: _log_context.progress_callback(percent)
|
||||
except: pass
|
||||
|
||||
def clean_json(text):
|
||||
text = text.replace("```json", "").replace("```", "").strip()
|
||||
# Robust extraction: find first { or [ and last } or ]
|
||||
@@ -32,6 +40,32 @@ def clean_json(text):
|
||||
else:
|
||||
return text[start_arr:text.rfind(']')+1]
|
||||
|
||||
def sanitize_filename(name):
|
||||
"""Sanitizes a string to be safe for filenames."""
|
||||
if not name: return "Untitled"
|
||||
safe = "".join([c for c in name if c.isalnum() or c=='_']).replace(" ", "_")
|
||||
return safe if safe else "Untitled"
|
||||
|
||||
def chapter_sort_key(ch):
|
||||
"""Sort key for chapters handling integers, strings, Prologue, and Epilogue."""
|
||||
num = ch.get('num', 0)
|
||||
if isinstance(num, int): return num
|
||||
if isinstance(num, str) and num.isdigit(): return int(num)
|
||||
s = str(num).lower().strip()
|
||||
if 'prologue' in s: return -1
|
||||
if 'epilogue' in s: return 9999
|
||||
return 999
|
||||
|
||||
def get_sorted_book_folders(run_dir):
|
||||
"""Returns a list of book folder names in a run directory, sorted numerically."""
|
||||
if not os.path.exists(run_dir): return []
|
||||
subdirs = [d for d in os.listdir(run_dir) if os.path.isdir(os.path.join(run_dir, d)) and d.startswith("Book_")]
|
||||
def sort_key(d):
|
||||
parts = d.split('_')
|
||||
if len(parts) > 1 and parts[1].isdigit(): return int(parts[1])
|
||||
return 0
|
||||
return sorted(subdirs, key=sort_key)
|
||||
|
||||
# --- SHARED UTILS ---
|
||||
def log(phase, msg):
|
||||
timestamp = datetime.datetime.now().strftime('%H:%M:%S')
|
||||
@@ -158,45 +192,4 @@ def log_usage(folder, model_label, usage_metadata=None, image_count=0):
|
||||
"est_cost_usd": round(cost, 4)
|
||||
}
|
||||
|
||||
with open(log_path, 'w') as f: json.dump(data, f, indent=2)
|
||||
|
||||
def normalize_settings(bp):
|
||||
"""
|
||||
CRITICAL: Enforces defaults.
|
||||
1. If series_metadata is missing, force it to SINGLE mode.
|
||||
2. If length_settings is missing, force explicit numbers.
|
||||
"""
|
||||
# Force Series Default (1 Book)
|
||||
if 'series_metadata' not in bp:
|
||||
bp['series_metadata'] = {
|
||||
"is_series": False,
|
||||
"mode": "single",
|
||||
"series_title": "Standalone",
|
||||
"total_books_to_generate": 1
|
||||
}
|
||||
|
||||
# Check for empty series count just in case
|
||||
if bp['series_metadata'].get('total_books_to_generate') is None:
|
||||
bp['series_metadata']['total_books_to_generate'] = 1
|
||||
|
||||
# Force Length Defaults
|
||||
settings = bp.get('length_settings', {})
|
||||
label = settings.get('label', 'Novella') # Default to Novella if nothing provided
|
||||
|
||||
# Get defaults based on label (or Novella if unknown)
|
||||
presets = get_length_presets()
|
||||
defaults = presets.get(label, presets['Novella'])
|
||||
|
||||
if 'chapters' not in settings: settings['chapters'] = defaults['chapters']
|
||||
if 'words' not in settings: settings['words'] = defaults['words']
|
||||
|
||||
# Smart Depth Calculation (if not manually set)
|
||||
if 'depth' not in settings:
|
||||
c = int(settings['chapters'])
|
||||
if c <= 5: settings['depth'] = 1
|
||||
elif c <= 20: settings['depth'] = 2
|
||||
elif c <= 40: settings['depth'] = 3
|
||||
else: settings['depth'] = 4
|
||||
|
||||
bp['length_settings'] = settings
|
||||
return bp
|
||||
with open(log_path, 'w') as f: json.dump(data, f, indent=2)
|
||||
@@ -1,20 +1,22 @@
|
||||
import os
|
||||
import json
|
||||
import html
|
||||
import shutil
|
||||
import markdown
|
||||
from functools import wraps
|
||||
from types import SimpleNamespace
|
||||
from datetime import datetime, timedelta
|
||||
from sqlalchemy import func
|
||||
from urllib.parse import urlparse, urljoin
|
||||
from sqlalchemy import func, text
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from flask import Flask, render_template, request, redirect, url_for, flash, send_from_directory, session
|
||||
from flask_login import LoginManager, login_user, login_required, logout_user, current_user
|
||||
from werkzeug.security import generate_password_hash, check_password_hash
|
||||
from .web_db import db, User, Project, Run, LogEntry
|
||||
from .web_tasks import huey, generate_book_task, regenerate_artifacts_task
|
||||
from .web_tasks import huey, generate_book_task, regenerate_artifacts_task, rewrite_chapter_task
|
||||
import config
|
||||
from . import utils
|
||||
from . import ai
|
||||
from . import story
|
||||
from . import export
|
||||
|
||||
# Calculate paths relative to this file (modules/web_app.py)
|
||||
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
@@ -35,85 +37,9 @@ login_manager.init_app(app)
|
||||
def load_user(user_id):
|
||||
return db.session.get(User, int(user_id))
|
||||
|
||||
def migrate_logs():
|
||||
"""Parses old log files and inserts them into the database."""
|
||||
runs = Run.query.all()
|
||||
migrated = 0
|
||||
files_to_clean = []
|
||||
for run in runs:
|
||||
# Check if DB logs exist
|
||||
has_db_logs = LogEntry.query.filter_by(run_id=run.id).first() is not None
|
||||
|
||||
# Locate Log File
|
||||
log_path = run.log_file
|
||||
if not log_path or not os.path.exists(log_path):
|
||||
# Try common fallback locations
|
||||
candidates = [
|
||||
os.path.join(run.project.folder_path, f"system_log_{run.id}.txt"),
|
||||
os.path.join(run.project.folder_path, "runs", "bible", f"run_{run.id}", "web_console.log")
|
||||
]
|
||||
for c in candidates:
|
||||
if os.path.exists(c):
|
||||
log_path = c
|
||||
break
|
||||
|
||||
if log_path and os.path.exists(log_path):
|
||||
if has_db_logs:
|
||||
# Logs are already in DB (New Run or previous migration). Mark file for cleanup.
|
||||
files_to_clean.append(log_path)
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
entries = []
|
||||
for line in lines:
|
||||
# Parse standard log format: [HH:MM:SS] PHASE | Message
|
||||
if '|' in line and line.strip().startswith('['):
|
||||
try:
|
||||
parts = line.split('|', 1)
|
||||
meta = parts[0].strip()
|
||||
msg = parts[1].strip()
|
||||
|
||||
if ']' in meta:
|
||||
ts_str = meta[1:meta.find(']')]
|
||||
phase = meta[meta.find(']')+1:].strip()
|
||||
|
||||
# Reconstruct datetime
|
||||
base_date = run.start_time.date() if run.start_time else datetime.utcnow().date()
|
||||
t_time = datetime.strptime(ts_str, "%H:%M:%S").time()
|
||||
dt = datetime.combine(base_date, t_time)
|
||||
|
||||
entries.append(LogEntry(run_id=run.id, timestamp=dt, phase=phase, message=msg))
|
||||
except: continue
|
||||
|
||||
if entries:
|
||||
db.session.add_all(entries)
|
||||
migrated += 1
|
||||
files_to_clean.append(log_path)
|
||||
except Exception as e:
|
||||
print(f"Migration failed for Run {run.id}: {e}")
|
||||
|
||||
if migrated > 0:
|
||||
db.session.commit()
|
||||
print(f"✅ Migrated logs for {migrated} runs to Database.")
|
||||
|
||||
# Cleanup files (even if no new migrations happened)
|
||||
if files_to_clean:
|
||||
count = 0
|
||||
for fpath in files_to_clean:
|
||||
try:
|
||||
os.remove(fpath)
|
||||
count += 1
|
||||
except: pass
|
||||
if count > 0:
|
||||
print(f"🧹 Cleaned up {count} redundant log files.")
|
||||
|
||||
# --- SETUP ---
|
||||
with app.app_context():
|
||||
db.create_all()
|
||||
migrate_logs()
|
||||
|
||||
# Auto-create Admin from Environment Variables (Docker/Portainer Setup)
|
||||
if config.ADMIN_USER and config.ADMIN_PASSWORD:
|
||||
@@ -126,6 +52,26 @@ with app.app_context():
|
||||
elif not admin.is_admin:
|
||||
admin.is_admin = True
|
||||
db.session.commit()
|
||||
|
||||
# Migration: Add 'progress' column if missing
|
||||
try:
|
||||
with db.engine.connect() as conn:
|
||||
conn.execute(text("ALTER TABLE run ADD COLUMN progress INTEGER DEFAULT 0"))
|
||||
conn.commit()
|
||||
print("✅ System: Added 'progress' column to Run table.")
|
||||
except: pass
|
||||
|
||||
# Reset stuck runs on startup
|
||||
try:
|
||||
stuck_runs = Run.query.filter_by(status='running').all()
|
||||
if stuck_runs:
|
||||
print(f"⚠️ System: Found {len(stuck_runs)} stuck runs. Resetting to 'failed'.")
|
||||
for r in stuck_runs:
|
||||
r.status = 'failed'
|
||||
r.end_time = datetime.utcnow()
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
print(f"⚠️ System: Failed to clean up stuck runs: {e}")
|
||||
|
||||
# --- DECORATORS ---
|
||||
def admin_required(f):
|
||||
@@ -137,6 +83,16 @@ def admin_required(f):
|
||||
return f(*args, **kwargs)
|
||||
return decorated_function
|
||||
|
||||
def is_project_locked(project_id):
|
||||
"""Returns True if the project has any completed runs (Book 1 written)."""
|
||||
return Run.query.filter_by(project_id=project_id, status='completed').count() > 0
|
||||
|
||||
def is_safe_url(target):
|
||||
ref_url = urlparse(request.host_url)
|
||||
test_url = urlparse(urljoin(request.host_url, target))
|
||||
return test_url.scheme in ('http', 'https') and \
|
||||
ref_url.netloc == test_url.netloc
|
||||
|
||||
# --- ROUTES ---
|
||||
|
||||
@app.route('/')
|
||||
@@ -153,7 +109,10 @@ def login():
|
||||
user = User.query.filter_by(username=username).first()
|
||||
if user and check_password_hash(user.password, password):
|
||||
login_user(user)
|
||||
return redirect(url_for('index'))
|
||||
next_page = request.args.get('next')
|
||||
if not next_page or not is_safe_url(next_page):
|
||||
next_page = url_for('index')
|
||||
return redirect(next_page)
|
||||
flash('Invalid credentials')
|
||||
return render_template('login.html')
|
||||
|
||||
@@ -167,10 +126,18 @@ def register():
|
||||
return redirect(url_for('register'))
|
||||
|
||||
new_user = User(username=username, password=generate_password_hash(password, method='pbkdf2:sha256'))
|
||||
db.session.add(new_user)
|
||||
db.session.commit()
|
||||
login_user(new_user)
|
||||
return redirect(url_for('index'))
|
||||
# Auto-promote if matches env var
|
||||
if config.ADMIN_USER and username == config.ADMIN_USER:
|
||||
new_user.is_admin = True
|
||||
try:
|
||||
db.session.add(new_user)
|
||||
db.session.commit()
|
||||
login_user(new_user)
|
||||
return redirect(url_for('index'))
|
||||
except IntegrityError:
|
||||
db.session.rollback()
|
||||
flash('Username exists')
|
||||
return redirect(url_for('register'))
|
||||
return render_template('register.html')
|
||||
|
||||
@app.route('/project/setup', methods=['POST'])
|
||||
@@ -280,16 +247,16 @@ def project_setup_refine():
|
||||
@login_required
|
||||
def create_project_final():
|
||||
title = request.form.get('title')
|
||||
safe_title = "".join([c for c in title if c.isalnum() or c=='_']).replace(" ", "_")
|
||||
safe_title = utils.sanitize_filename(title)
|
||||
|
||||
user_dir = os.path.join(config.DATA_DIR, "users", str(current_user.id))
|
||||
if not os.path.exists(user_dir): os.makedirs(user_dir)
|
||||
os.makedirs(user_dir, exist_ok=True)
|
||||
|
||||
proj_path = os.path.join(user_dir, safe_title)
|
||||
if os.path.exists(proj_path):
|
||||
safe_title += f"_{int(datetime.utcnow().timestamp())}"
|
||||
proj_path = os.path.join(user_dir, safe_title)
|
||||
os.makedirs(proj_path)
|
||||
os.makedirs(proj_path, exist_ok=True)
|
||||
|
||||
# Construct Bible from Form Data
|
||||
length_cat = request.form.get('length_category')
|
||||
@@ -349,12 +316,11 @@ def create_project_final():
|
||||
"plot_beats": []
|
||||
})
|
||||
|
||||
# Enrich via AI immediately if concept exists
|
||||
if concept:
|
||||
try:
|
||||
ai.init_models()
|
||||
bible = story.enrich(bible, proj_path)
|
||||
except: pass
|
||||
# Enrich via AI immediately (Always, to ensure Bible is full)
|
||||
try:
|
||||
ai.init_models()
|
||||
bible = story.enrich(bible, proj_path)
|
||||
except: pass
|
||||
|
||||
with open(os.path.join(proj_path, "bible.json"), 'w') as f:
|
||||
json.dump(bible, f, indent=2)
|
||||
@@ -365,6 +331,52 @@ def create_project_final():
|
||||
|
||||
return redirect(url_for('view_project', id=new_proj.id))
|
||||
|
||||
@app.route('/project/import', methods=['POST'])
|
||||
@login_required
|
||||
def import_project():
|
||||
if 'bible_file' not in request.files:
|
||||
flash('No file part')
|
||||
return redirect(url_for('index'))
|
||||
|
||||
file = request.files['bible_file']
|
||||
if file.filename == '':
|
||||
flash('No selected file')
|
||||
return redirect(url_for('index'))
|
||||
|
||||
if file:
|
||||
try:
|
||||
bible = json.load(file)
|
||||
# Basic validation
|
||||
if 'project_metadata' not in bible or 'title' not in bible['project_metadata']:
|
||||
flash("Invalid Bible format: Missing project_metadata or title.")
|
||||
return redirect(url_for('index'))
|
||||
|
||||
title = bible['project_metadata']['title']
|
||||
safe_title = utils.sanitize_filename(title)
|
||||
|
||||
user_dir = os.path.join(config.DATA_DIR, "users", str(current_user.id))
|
||||
os.makedirs(user_dir, exist_ok=True)
|
||||
|
||||
proj_path = os.path.join(user_dir, safe_title)
|
||||
if os.path.exists(proj_path):
|
||||
safe_title += f"_{int(datetime.utcnow().timestamp())}"
|
||||
proj_path = os.path.join(user_dir, safe_title)
|
||||
os.makedirs(proj_path)
|
||||
|
||||
with open(os.path.join(proj_path, "bible.json"), 'w') as f:
|
||||
json.dump(bible, f, indent=2)
|
||||
|
||||
new_proj = Project(user_id=current_user.id, name=title, folder_path=proj_path)
|
||||
db.session.add(new_proj)
|
||||
db.session.commit()
|
||||
|
||||
flash(f"Project '{title}' imported successfully.")
|
||||
return redirect(url_for('view_project', id=new_proj.id))
|
||||
|
||||
except Exception as e:
|
||||
flash(f"Import failed: {str(e)}")
|
||||
return redirect(url_for('index'))
|
||||
|
||||
@app.route('/project/<int:id>')
|
||||
@login_required
|
||||
def view_project(id):
|
||||
@@ -393,11 +405,12 @@ def view_project(id):
|
||||
artifacts = []
|
||||
cover_image = None
|
||||
generated_books = {} # Map book_number -> {status: 'generated', run_id: int, folder: str}
|
||||
locked = is_project_locked(id)
|
||||
|
||||
# Scan ALL completed runs to find the latest status of each book
|
||||
for r in runs:
|
||||
if r.status == 'completed':
|
||||
run_dir = os.path.join(proj.folder_path, "runs", "bible", f"run_{r.id}")
|
||||
run_dir = os.path.join(proj.folder_path, "runs", f"run_{r.id}")
|
||||
if os.path.exists(run_dir):
|
||||
# 1. Scan for Generated Books
|
||||
for d in os.listdir(run_dir):
|
||||
@@ -420,13 +433,13 @@ def view_project(id):
|
||||
|
||||
# Collect Artifacts from Latest Run
|
||||
if latest_run:
|
||||
run_dir = os.path.join(proj.folder_path, "runs", "bible", f"run_{latest_run.id}")
|
||||
run_dir = os.path.join(proj.folder_path, "runs", f"run_{latest_run.id}")
|
||||
if os.path.exists(run_dir):
|
||||
# Find Cover Image (Root or First Book)
|
||||
if os.path.exists(os.path.join(run_dir, "cover.png")):
|
||||
cover_image = "cover.png"
|
||||
else:
|
||||
subdirs = sorted([d for d in os.listdir(run_dir) if os.path.isdir(os.path.join(run_dir, d)) and d.startswith("Book_")])
|
||||
subdirs = utils.get_sorted_book_folders(run_dir)
|
||||
for d in subdirs:
|
||||
if os.path.exists(os.path.join(run_dir, d, "cover.png")):
|
||||
cover_image = os.path.join(d, "cover.png").replace("\\", "/")
|
||||
@@ -442,7 +455,7 @@ def view_project(id):
|
||||
'type': f.split('.')[-1].upper()
|
||||
})
|
||||
|
||||
return render_template('project.html', project=proj, bible=bible_data, runs=runs, active_run=latest_run, artifacts=artifacts, cover_image=cover_image, personas=personas, generated_books=generated_books, other_projects=other_projects)
|
||||
return render_template('project.html', project=proj, bible=bible_data, runs=runs, active_run=latest_run, artifacts=artifacts, cover_image=cover_image, personas=personas, generated_books=generated_books, other_projects=other_projects, locked=locked)
|
||||
|
||||
@app.route('/project/<int:id>/run', methods=['POST'])
|
||||
@login_required
|
||||
@@ -477,6 +490,10 @@ def update_project_metadata(id):
|
||||
proj = db.session.get(Project, id) or Project.query.get_or_404(id)
|
||||
if proj.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if is_project_locked(id):
|
||||
flash("Project is locked. Clone it to make changes.")
|
||||
return redirect(url_for('view_project', id=id))
|
||||
|
||||
new_title = request.form.get('title')
|
||||
new_author = request.form.get('author')
|
||||
|
||||
@@ -495,12 +512,56 @@ def update_project_metadata(id):
|
||||
|
||||
return redirect(url_for('view_project', id=id))
|
||||
|
||||
@app.route('/project/<int:id>/clone', methods=['POST'])
|
||||
@login_required
|
||||
def clone_project(id):
|
||||
source_proj = db.session.get(Project, id) or Project.query.get_or_404(id)
|
||||
if source_proj.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
new_name = request.form.get('new_name')
|
||||
instruction = request.form.get('instruction')
|
||||
|
||||
# Create New Project
|
||||
safe_title = utils.sanitize_filename(new_name)
|
||||
user_dir = os.path.join(config.DATA_DIR, "users", str(current_user.id))
|
||||
new_path = os.path.join(user_dir, safe_title)
|
||||
if os.path.exists(new_path):
|
||||
safe_title += f"_{int(datetime.utcnow().timestamp())}"
|
||||
new_path = os.path.join(user_dir, safe_title)
|
||||
os.makedirs(new_path)
|
||||
|
||||
# Copy Bible
|
||||
source_bible_path = os.path.join(source_proj.folder_path, "bible.json")
|
||||
if os.path.exists(source_bible_path):
|
||||
bible = utils.load_json(source_bible_path)
|
||||
bible['project_metadata']['title'] = new_name
|
||||
|
||||
# Apply AI Instruction if provided
|
||||
if instruction:
|
||||
try:
|
||||
ai.init_models()
|
||||
bible = story.refine_bible(bible, instruction, new_path) or bible
|
||||
except: pass
|
||||
|
||||
with open(os.path.join(new_path, "bible.json"), 'w') as f: json.dump(bible, f, indent=2)
|
||||
|
||||
new_proj = Project(user_id=current_user.id, name=new_name, folder_path=new_path)
|
||||
db.session.add(new_proj)
|
||||
db.session.commit()
|
||||
|
||||
flash(f"Project cloned as '{new_name}'.")
|
||||
return redirect(url_for('view_project', id=new_proj.id))
|
||||
|
||||
@app.route('/project/<int:id>/refine_bible', methods=['POST'])
|
||||
@login_required
|
||||
def refine_bible_route(id):
|
||||
proj = db.session.get(Project, id) or Project.query.get_or_404(id)
|
||||
if proj.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if is_project_locked(id):
|
||||
flash("Project is locked. Clone it to make changes.")
|
||||
return redirect(url_for('view_project', id=id))
|
||||
|
||||
instruction = request.form.get('instruction')
|
||||
if not instruction:
|
||||
flash("Instruction required.")
|
||||
@@ -533,6 +594,10 @@ def add_book(id):
|
||||
proj = db.session.get(Project, id) or Project.query.get_or_404(id)
|
||||
if proj.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if is_project_locked(id):
|
||||
flash("Project is locked. Clone it to make changes.")
|
||||
return redirect(url_for('view_project', id=id))
|
||||
|
||||
title = request.form.get('title', 'Untitled')
|
||||
instruction = request.form.get('instruction', '')
|
||||
|
||||
@@ -566,6 +631,10 @@ def update_book_details(id, book_num):
|
||||
proj = db.session.get(Project, id) or Project.query.get_or_404(id)
|
||||
if proj.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if is_project_locked(id):
|
||||
flash("Project is locked. Clone it to make changes.")
|
||||
return redirect(url_for('view_project', id=id))
|
||||
|
||||
new_title = request.form.get('title')
|
||||
new_instruction = request.form.get('instruction')
|
||||
|
||||
@@ -576,7 +645,7 @@ def update_book_details(id, book_num):
|
||||
for b in bible['books']:
|
||||
if b.get('book_number') == book_num:
|
||||
if new_title: b['title'] = new_title
|
||||
if new_instruction: b['manual_instruction'] = new_instruction
|
||||
if new_instruction is not None: b['manual_instruction'] = new_instruction
|
||||
break
|
||||
with open(bible_path, 'w') as f: json.dump(bible, f, indent=2)
|
||||
flash(f"Book {book_num} updated.")
|
||||
@@ -589,6 +658,10 @@ def delete_book(id, book_num):
|
||||
proj = db.session.get(Project, id) or Project.query.get_or_404(id)
|
||||
if proj.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if is_project_locked(id):
|
||||
flash("Project is locked. Clone it to make changes.")
|
||||
return redirect(url_for('view_project', id=id))
|
||||
|
||||
bible_path = os.path.join(proj.folder_path, "bible.json")
|
||||
bible = utils.load_json(bible_path)
|
||||
|
||||
@@ -617,6 +690,10 @@ def import_characters(id):
|
||||
if not target_proj or not source_proj: return "Project not found", 404
|
||||
if target_proj.user_id != current_user.id or source_proj.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if is_project_locked(id):
|
||||
flash("Project is locked. Clone it to make changes.")
|
||||
return redirect(url_for('view_project', id=id))
|
||||
|
||||
target_bible = utils.load_json(os.path.join(target_proj.folder_path, "bible.json"))
|
||||
source_bible = utils.load_json(os.path.join(source_proj.folder_path, "bible.json"))
|
||||
|
||||
@@ -644,6 +721,10 @@ def set_project_persona(id):
|
||||
proj = db.session.get(Project, id) or Project.query.get_or_404(id)
|
||||
if proj.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if is_project_locked(id):
|
||||
flash("Project is locked. Clone it to make changes.")
|
||||
return redirect(url_for('view_project', id=id))
|
||||
|
||||
persona_name = request.form.get('persona_name')
|
||||
|
||||
bible_path = os.path.join(proj.folder_path, "bible.json")
|
||||
@@ -671,11 +752,14 @@ def regenerate_artifacts(run_id):
|
||||
run = db.session.get(Run, run_id) or Run.query.get_or_404(run_id)
|
||||
if run.project.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if run.status == 'running':
|
||||
flash("Run is already active. Please wait for it to finish.")
|
||||
return redirect(url_for('view_run', id=run_id))
|
||||
|
||||
feedback = request.form.get('feedback')
|
||||
|
||||
# Reset state immediately so UI polls correctly
|
||||
run.status = 'queued'
|
||||
LogEntry.query.filter_by(run_id=run_id).delete()
|
||||
db.session.commit()
|
||||
|
||||
regenerate_artifacts_task(run_id, run.project.folder_path, feedback=feedback)
|
||||
@@ -692,6 +776,12 @@ def stop_run(id):
|
||||
run.status = 'cancelled'
|
||||
run.end_time = datetime.utcnow()
|
||||
db.session.commit()
|
||||
|
||||
# Signal the backend process to stop by creating a .stop file
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", f"run_{run.id}")
|
||||
if os.path.exists(run_dir):
|
||||
with open(os.path.join(run_dir, ".stop"), 'w') as f: f.write("stop")
|
||||
|
||||
flash(f"Run {id} marked as cancelled.")
|
||||
|
||||
return redirect(url_for('view_project', id=run.project_id))
|
||||
@@ -709,10 +799,14 @@ def restart_run(id):
|
||||
|
||||
# Check mode: 'resume' (default) vs 'restart'
|
||||
mode = request.form.get('mode', 'resume')
|
||||
allow_copy = (mode == 'resume')
|
||||
feedback = request.form.get('feedback')
|
||||
keep_cover = 'keep_cover' in request.form
|
||||
force_regen = 'force_regenerate' in request.form
|
||||
allow_copy = (mode == 'resume' and not force_regen)
|
||||
if feedback: allow_copy = False # Force regeneration if feedback provided to ensure changes are applied
|
||||
|
||||
task = generate_book_task(new_run.id, run.project.folder_path, os.path.join(run.project.folder_path, "bible.json"), allow_copy=allow_copy)
|
||||
flash(f"Started new Run #{new_run.id}.")
|
||||
task = generate_book_task(new_run.id, run.project.folder_path, os.path.join(run.project.folder_path, "bible.json"), allow_copy=allow_copy, feedback=feedback, source_run_id=id if feedback else None, keep_cover=keep_cover)
|
||||
flash(f"Started new Run #{new_run.id}" + (" with modifications." if feedback else "."))
|
||||
return redirect(url_for('view_project', id=run.project_id))
|
||||
|
||||
@app.route('/run/<int:id>')
|
||||
@@ -732,13 +826,12 @@ def view_run(id):
|
||||
with open(run.log_file, 'r') as f: log_content = f.read()
|
||||
|
||||
# Fetch Artifacts for Display
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", "bible", f"run_{run.id}")
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", f"run_{run.id}")
|
||||
|
||||
# Detect Books in Run (Series Support)
|
||||
books_data = []
|
||||
if os.path.exists(run_dir):
|
||||
subdirs = sorted([d for d in os.listdir(run_dir) if os.path.isdir(os.path.join(run_dir, d)) and d.startswith("Book_")])
|
||||
if not subdirs: subdirs = ["."] # Handle legacy/flat runs
|
||||
subdirs = utils.get_sorted_book_folders(run_dir)
|
||||
|
||||
for d in subdirs:
|
||||
b_path = os.path.join(run_dir, d)
|
||||
@@ -766,8 +859,8 @@ def view_run(id):
|
||||
|
||||
# Load Tracking Data for Run Details
|
||||
tracking = {"events": [], "characters": {}, "content_warnings": []}
|
||||
# We load tracking from the first book found to populate the general stats
|
||||
book_dir = os.path.join(run_dir, books_data[0]['folder']) if books_data else run_dir
|
||||
# We load tracking from the LAST book found to populate the general stats (most up-to-date)
|
||||
book_dir = os.path.join(run_dir, books_data[-1]['folder']) if books_data else run_dir
|
||||
if os.path.exists(book_dir):
|
||||
t_ev = os.path.join(book_dir, "tracking_events.json")
|
||||
t_ch = os.path.join(book_dir, "tracking_characters.json")
|
||||
@@ -789,11 +882,13 @@ def run_status(id):
|
||||
# Check status from DB or fallback to log file
|
||||
|
||||
log_content = ""
|
||||
last_log = None
|
||||
|
||||
# 1. Try Database Logs (Fastest & Best)
|
||||
logs = LogEntry.query.filter_by(run_id=id).order_by(LogEntry.timestamp).all()
|
||||
if logs:
|
||||
log_content = "\n".join([f"[{l.timestamp.strftime('%H:%M:%S')}] {l.phase:<15} | {l.message}" for l in logs])
|
||||
last_log = logs[-1]
|
||||
|
||||
# 2. Fallback to File (For old runs or if DB logging fails)
|
||||
if not log_content:
|
||||
@@ -804,7 +899,15 @@ def run_status(id):
|
||||
if os.path.exists(temp_log):
|
||||
with open(temp_log, 'r') as f: log_content = f.read()
|
||||
|
||||
return {"status": run.status, "log": log_content, "cost": run.cost}
|
||||
response = {"status": run.status, "log": log_content, "cost": run.cost, "percent": run.progress}
|
||||
|
||||
if last_log:
|
||||
response["progress"] = {
|
||||
"phase": last_log.phase,
|
||||
"message": last_log.message,
|
||||
"timestamp": last_log.timestamp.timestamp()
|
||||
}
|
||||
return response
|
||||
|
||||
@app.route('/project/<int:run_id>/download')
|
||||
@login_required
|
||||
@@ -813,25 +916,223 @@ def download_artifact(run_id):
|
||||
run = db.session.get(Run, run_id) or Run.query.get_or_404(run_id)
|
||||
if run.project.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", "bible", f"run_{run.id}")
|
||||
if not filename: return "Missing filename", 400
|
||||
|
||||
# Security Check: Prevent path traversal
|
||||
# Combined check using normpath to ensure it stays within root and catches basic traversal chars
|
||||
if os.path.isabs(filename) or ".." in os.path.normpath(filename) or ":" in filename:
|
||||
return "Invalid filename", 400
|
||||
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", f"run_{run.id}")
|
||||
|
||||
# If file not found in root, check subfolders (Series Support)
|
||||
if not os.path.exists(os.path.join(run_dir, filename)) and os.path.exists(run_dir):
|
||||
subdirs = sorted([d for d in os.listdir(run_dir) if os.path.isdir(os.path.join(run_dir, d)) and d.startswith("Book_")])
|
||||
if subdirs:
|
||||
# Try the first book folder
|
||||
possible_path = os.path.join(subdirs[0], filename)
|
||||
subdirs = utils.get_sorted_book_folders(run_dir)
|
||||
# Scan all book folders
|
||||
for d in subdirs:
|
||||
possible_path = os.path.join(d, filename)
|
||||
if os.path.exists(os.path.join(run_dir, possible_path)):
|
||||
filename = possible_path
|
||||
break
|
||||
|
||||
return send_from_directory(run_dir, filename, as_attachment=True)
|
||||
|
||||
@app.route('/project/<int:run_id>/read/<string:book_folder>')
|
||||
@login_required
|
||||
def read_book(run_id, book_folder):
|
||||
run = db.session.get(Run, run_id) or Run.query.get_or_404(run_id)
|
||||
if run.project.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
# Security Check: Prevent path traversal in book_folder
|
||||
if not book_folder or "/" in book_folder or "\\" in book_folder or ".." in book_folder: return "Invalid book folder", 400
|
||||
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", f"run_{run.id}")
|
||||
book_path = os.path.join(run_dir, book_folder)
|
||||
ms_path = os.path.join(book_path, "manuscript.json")
|
||||
|
||||
if not os.path.exists(ms_path):
|
||||
flash("Manuscript not found.")
|
||||
return redirect(url_for('view_run', id=run_id))
|
||||
|
||||
manuscript = utils.load_json(ms_path)
|
||||
|
||||
# Sort by chapter number (Handle Prologue/Epilogue)
|
||||
manuscript.sort(key=utils.chapter_sort_key)
|
||||
|
||||
# Render Markdown for display
|
||||
for ch in manuscript:
|
||||
ch['html_content'] = markdown.markdown(ch.get('content', ''))
|
||||
|
||||
return render_template('read_book.html', run=run, book_folder=book_folder, manuscript=manuscript)
|
||||
|
||||
@app.route('/project/<int:run_id>/save_chapter', methods=['POST'])
|
||||
@login_required
|
||||
def save_chapter(run_id):
|
||||
run = db.session.get(Run, run_id) or Run.query.get_or_404(run_id)
|
||||
if run.project.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if run.status == 'running':
|
||||
return "Cannot edit chapter while run is active.", 409
|
||||
|
||||
book_folder = request.form.get('book_folder')
|
||||
chap_num_raw = request.form.get('chapter_num')
|
||||
try: chap_num = int(chap_num_raw)
|
||||
except: chap_num = chap_num_raw
|
||||
new_content = request.form.get('content')
|
||||
|
||||
# Security Check
|
||||
if not book_folder or "/" in book_folder or "\\" in book_folder or ".." in book_folder: return "Invalid book folder", 400
|
||||
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", f"run_{run.id}")
|
||||
ms_path = os.path.join(run_dir, book_folder, "manuscript.json")
|
||||
|
||||
if os.path.exists(ms_path):
|
||||
ms = utils.load_json(ms_path)
|
||||
for ch in ms:
|
||||
if ch.get('num') == chap_num:
|
||||
ch['content'] = new_content
|
||||
break
|
||||
with open(ms_path, 'w') as f: json.dump(ms, f, indent=2)
|
||||
|
||||
# Regenerate Artifacts (EPUB/DOCX) to reflect manual edits
|
||||
book_path = os.path.join(run_dir, book_folder)
|
||||
bp_path = os.path.join(book_path, "final_blueprint.json")
|
||||
if os.path.exists(bp_path):
|
||||
bp = utils.load_json(bp_path)
|
||||
export.compile_files(bp, ms, book_path)
|
||||
|
||||
return "Saved", 200
|
||||
return "Error", 500
|
||||
|
||||
@app.route('/project/<int:run_id>/check_consistency/<string:book_folder>')
|
||||
@login_required
|
||||
def check_consistency(run_id, book_folder):
|
||||
run = db.session.get(Run, run_id) or Run.query.get_or_404(run_id)
|
||||
|
||||
# Security Check
|
||||
if not book_folder or "/" in book_folder or "\\" in book_folder or ".." in book_folder: return "Invalid book folder", 400
|
||||
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", f"run_{run.id}")
|
||||
book_path = os.path.join(run_dir, book_folder)
|
||||
|
||||
bp = utils.load_json(os.path.join(book_path, "final_blueprint.json"))
|
||||
ms = utils.load_json(os.path.join(book_path, "manuscript.json"))
|
||||
|
||||
if not bp or not ms:
|
||||
return "Data files missing or corrupt.", 404
|
||||
|
||||
try: ai.init_models()
|
||||
except: pass
|
||||
|
||||
report = story.analyze_consistency(bp, ms, book_path)
|
||||
return render_template('consistency_report.html', report=report, run=run, book_folder=book_folder)
|
||||
|
||||
@app.route('/project/<int:run_id>/sync_book/<string:book_folder>', methods=['POST'])
|
||||
@login_required
|
||||
def sync_book_metadata(run_id, book_folder):
|
||||
run = db.session.get(Run, run_id) or Run.query.get_or_404(run_id)
|
||||
if run.project.user_id != current_user.id: return "Unauthorized", 403
|
||||
|
||||
if run.status == 'running':
|
||||
flash("Cannot sync metadata while run is active.")
|
||||
return redirect(url_for('read_book', run_id=run_id, book_folder=book_folder))
|
||||
|
||||
# Security Check
|
||||
if not book_folder or "/" in book_folder or "\\" in book_folder or ".." in book_folder: return "Invalid book folder", 400
|
||||
|
||||
run_dir = os.path.join(run.project.folder_path, "runs", f"run_{run.id}")
|
||||
book_path = os.path.join(run_dir, book_folder)
|
||||
|
||||
ms_path = os.path.join(book_path, "manuscript.json")
|
||||
bp_path = os.path.join(book_path, "final_blueprint.json")
|
||||
|
||||
if os.path.exists(ms_path) and os.path.exists(bp_path):
|
||||
ms = utils.load_json(ms_path)
|
||||
bp = utils.load_json(bp_path)
|
||||
|
||||
if not ms or not bp:
|
||||
flash("Data files corrupt.")
|
||||
return redirect(url_for('read_book', run_id=run_id, book_folder=book_folder))
|
||||
|
||||
try: ai.init_models()
|
||||
except: pass
|
||||
|
||||
# 1. Harvest new characters/info from the EDITED text (Updates BP)
|
||||
bp = story.harvest_metadata(bp, book_path, ms)
|
||||
|
||||
# 2. Sync Tracking (Ensure new characters exist in tracking file)
|
||||
tracking_path = os.path.join(book_path, "tracking_characters.json")
|
||||
if os.path.exists(tracking_path):
|
||||
tracking_chars = utils.load_json(tracking_path) or {}
|
||||
updated_tracking = False
|
||||
for c in bp.get('characters', []):
|
||||
if c.get('name') and c['name'] not in tracking_chars:
|
||||
tracking_chars[c['name']] = {"descriptors": [c.get('description', '')], "likes_dislikes": [], "last_worn": "Unknown"}
|
||||
updated_tracking = True
|
||||
if updated_tracking:
|
||||
with open(tracking_path, 'w') as f: json.dump(tracking_chars, f, indent=2)
|
||||
|
||||
# 3. Update Persona (Style might have changed during manual edits)
|
||||
story.update_persona_sample(bp, book_path)
|
||||
|
||||
with open(bp_path, 'w') as f: json.dump(bp, f, indent=2)
|
||||
flash("Metadata synced. Future generations will respect your edits.")
|
||||
else:
|
||||
flash("Files not found.")
|
||||
|
||||
return redirect(url_for('read_book', run_id=run_id, book_folder=book_folder))
|
||||
|
||||
@app.route('/project/<int:run_id>/rewrite_chapter', methods=['POST'])
|
||||
@login_required
|
||||
def rewrite_chapter(run_id):
|
||||
run = db.session.get(Run, run_id) or Run.query.get_or_404(run_id)
|
||||
if run.project.user_id != current_user.id:
|
||||
return {"error": "Unauthorized"}, 403
|
||||
|
||||
if run.status == 'running':
|
||||
return {"error": "Cannot rewrite while run is active."}, 409
|
||||
|
||||
data = request.json
|
||||
book_folder = data.get('book_folder')
|
||||
chap_num = data.get('chapter_num')
|
||||
instruction = data.get('instruction')
|
||||
|
||||
if not book_folder or chap_num is None or not instruction:
|
||||
return {"error": "Missing parameters"}, 400
|
||||
|
||||
# Security Check
|
||||
if "/" in book_folder or "\\" in book_folder or ".." in book_folder: return {"error": "Invalid book folder"}, 400
|
||||
|
||||
# Try to convert to int, but allow strings (e.g. "Epilogue")
|
||||
try: chap_num = int(chap_num)
|
||||
except: pass
|
||||
|
||||
# Start background task
|
||||
task = rewrite_chapter_task(run.id, run.project.folder_path, book_folder, chap_num, instruction)
|
||||
|
||||
# Store task ID in session to poll for status
|
||||
session['rewrite_task_id'] = task.id
|
||||
|
||||
return {"status": "queued", "task_id": task.id}, 202
|
||||
|
||||
@app.route('/task_status/<string:task_id>')
|
||||
@login_required
|
||||
def get_task_status(task_id):
|
||||
task_result = huey.result(task_id, peek=True)
|
||||
|
||||
if task_result is None:
|
||||
return {"status": "running"}
|
||||
else:
|
||||
return {"status": "completed", "success": task_result}
|
||||
|
||||
@app.route('/logout')
|
||||
def logout():
|
||||
logout_user()
|
||||
return redirect(url_for('login'))
|
||||
|
||||
@app.route('/debug/routes')
|
||||
@login_required
|
||||
@admin_required
|
||||
def debug_routes():
|
||||
output = []
|
||||
for rule in app.url_map.iter_rules():
|
||||
@@ -844,6 +1145,7 @@ def debug_routes():
|
||||
|
||||
@app.route('/system/optimize_models', methods=['POST'])
|
||||
@login_required
|
||||
@admin_required
|
||||
def optimize_models():
|
||||
# Force refresh via AI module (safely handles failures)
|
||||
try:
|
||||
@@ -859,12 +1161,6 @@ def optimize_models():
|
||||
|
||||
return redirect(request.referrer or url_for('index'))
|
||||
|
||||
# --- COMPATIBILITY ROUTES (Fix 404s) ---
|
||||
@app.route('/project/<int:project_id>/run/<int:run_id>')
|
||||
@login_required
|
||||
def legacy_run_redirect(project_id, run_id):
|
||||
return redirect(url_for('view_run', id=run_id))
|
||||
|
||||
@app.route('/system/status')
|
||||
@login_required
|
||||
def system_status():
|
||||
@@ -880,11 +1176,7 @@ def system_status():
|
||||
models_info = cache_data.get('models', {})
|
||||
except: pass
|
||||
|
||||
# Create a placeholder run object so the template doesn't crash
|
||||
dummy_project = SimpleNamespace(user_id=current_user.id, name="System", folder_path="")
|
||||
dummy_run = SimpleNamespace(id=0, status="System Status", cost=0.0, log_file=None, start_time=datetime.utcnow(), project=dummy_project, duration=lambda: "N/A")
|
||||
|
||||
return render_template('system_status.html', run=dummy_run, models=models_info, cache=cache_data, datetime=datetime)
|
||||
return render_template('system_status.html', models=models_info, cache=cache_data, datetime=datetime)
|
||||
|
||||
@app.route('/personas')
|
||||
@login_required
|
||||
@@ -1105,7 +1397,7 @@ def admin_spend_report():
|
||||
).join(Project, Project.user_id == User.id)\
|
||||
.join(Run, Run.project_id == Project.id)\
|
||||
.filter(Run.start_time >= start_date)\
|
||||
.group_by(User.id).all()
|
||||
.group_by(User.id, User.username).all()
|
||||
|
||||
report = []
|
||||
total_period_spend = 0.0
|
||||
@@ -1182,7 +1474,7 @@ if __name__ == '__main__':
|
||||
c.run()
|
||||
|
||||
# Configuration
|
||||
debug_mode = True
|
||||
debug_mode = os.environ.get("FLASK_DEBUG", "False").lower() == "true"
|
||||
|
||||
# Run worker if: 1. In reloader child process OR 2. Reloader is disabled (debug=False)
|
||||
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" or not debug_mode:
|
||||
|
||||
@@ -30,6 +30,7 @@ class Run(db.Model):
|
||||
end_time = db.Column(db.DateTime, nullable=True)
|
||||
log_file = db.Column(db.String(300), nullable=True)
|
||||
cost = db.Column(db.Float, default=0.0)
|
||||
progress = db.Column(db.Integer, default=0)
|
||||
|
||||
# Relationships
|
||||
logs = db.relationship('LogEntry', backref='run', lazy=True, cascade="all, delete-orphan")
|
||||
|
||||
@@ -7,8 +7,8 @@ from datetime import datetime
|
||||
from huey import SqliteHuey
|
||||
from .web_db import db, Run, User, Project
|
||||
from . import utils
|
||||
import main
|
||||
import config
|
||||
from . import story, ai, marketing, export
|
||||
|
||||
# Configure Huey (Task Queue)
|
||||
huey = SqliteHuey('bookapp_queue', filename=os.path.join(config.DATA_DIR, 'queue.db'))
|
||||
@@ -25,14 +25,23 @@ def db_log_callback(db_path, run_id, phase, msg):
|
||||
time.sleep(0.1)
|
||||
except: break
|
||||
|
||||
def db_progress_callback(db_path, run_id, percent):
|
||||
"""Updates run progress in SQLite."""
|
||||
for _ in range(5):
|
||||
try:
|
||||
with sqlite3.connect(db_path, timeout=5) as conn:
|
||||
conn.execute("UPDATE run SET progress = ? WHERE id = ?", (percent, run_id))
|
||||
break
|
||||
except sqlite3.OperationalError: time.sleep(0.1)
|
||||
except: break
|
||||
|
||||
@huey.task()
|
||||
def generate_book_task(run_id, project_path, bible_path, allow_copy=True):
|
||||
def generate_book_task(run_id, project_path, bible_path, allow_copy=True, feedback=None, source_run_id=None, keep_cover=False):
|
||||
"""
|
||||
Background task to run the book generation.
|
||||
"""
|
||||
# 1. Setup Logging
|
||||
log_filename = f"system_log_{run_id}.txt"
|
||||
log_path = os.path.join(project_path, "runs", "bible", f"run_{run_id}", log_filename)
|
||||
|
||||
# Log to project root initially until run folder is created by main
|
||||
initial_log = os.path.join(project_path, log_filename)
|
||||
@@ -41,6 +50,7 @@ def generate_book_task(run_id, project_path, bible_path, allow_copy=True):
|
||||
# Hook up Database Logging
|
||||
db_path = os.path.join(config.DATA_DIR, "bookapp.db")
|
||||
utils.set_log_callback(lambda p, m: db_log_callback(db_path, run_id, p, m))
|
||||
utils.set_progress_callback(lambda p: db_progress_callback(db_path, run_id, p))
|
||||
|
||||
# Set Status to Running
|
||||
try:
|
||||
@@ -51,10 +61,78 @@ def generate_book_task(run_id, project_path, bible_path, allow_copy=True):
|
||||
utils.log("SYSTEM", f"Starting Job #{run_id}")
|
||||
|
||||
try:
|
||||
# 1.1 Handle Feedback / Modification (Re-run logic)
|
||||
if feedback and source_run_id:
|
||||
utils.log("SYSTEM", f"Applying feedback to Run #{source_run_id}: '{feedback}'")
|
||||
|
||||
# Load Source Data (Prefer final_blueprint from source run to capture its state)
|
||||
source_run_dir = os.path.join(project_path, "runs", f"run_{source_run_id}")
|
||||
bible_data = utils.load_json(bible_path)
|
||||
|
||||
# Try to find the blueprint of the book in the source run
|
||||
# (Simplification: If multiple books, we apply feedback to the Bible generally)
|
||||
if bible_data:
|
||||
try:
|
||||
ai.init_models()
|
||||
new_bible = story.refine_bible(bible_data, feedback, project_path)
|
||||
if new_bible:
|
||||
bible_data = new_bible
|
||||
# Save updated Bible (This updates the project state to the new "fork")
|
||||
with open(bible_path, 'w') as f: json.dump(bible_data, f, indent=2)
|
||||
utils.log("SYSTEM", "Bible updated with feedback.")
|
||||
except Exception as e:
|
||||
utils.log("ERROR", f"Failed to refine bible: {e}")
|
||||
|
||||
# 1.2 Keep Cover Art Logic
|
||||
if keep_cover and os.path.exists(source_run_dir):
|
||||
utils.log("SYSTEM", "Attempting to preserve cover art...")
|
||||
|
||||
# We need to predict the new folder names to place the covers
|
||||
# main.py uses: Book_{n}_{safe_title}
|
||||
current_run_dir = os.path.join(project_path, "runs", f"run_{run_id}")
|
||||
if not os.path.exists(current_run_dir): os.makedirs(current_run_dir)
|
||||
|
||||
# Map Source Books -> Target Books by Book Number
|
||||
source_books = {}
|
||||
for d in os.listdir(source_run_dir):
|
||||
if d.startswith("Book_") and os.path.isdir(os.path.join(source_run_dir, d)):
|
||||
parts = d.split('_')
|
||||
if len(parts) > 1 and parts[1].isdigit():
|
||||
source_books[int(parts[1])] = os.path.join(source_run_dir, d)
|
||||
|
||||
if bible_data and 'books' in bible_data:
|
||||
for i, book in enumerate(bible_data['books']):
|
||||
b_num = book.get('book_number', i+1)
|
||||
if b_num in source_books:
|
||||
# Found matching book in source
|
||||
src_folder = source_books[b_num]
|
||||
|
||||
# Predict Target Folder
|
||||
safe_title = utils.sanitize_filename(book.get('title', f"Book_{b_num}"))
|
||||
target_folder = os.path.join(current_run_dir, f"Book_{b_num}_{safe_title}")
|
||||
|
||||
os.makedirs(target_folder, exist_ok=True)
|
||||
|
||||
# Copy Cover
|
||||
src_cover = os.path.join(src_folder, "cover.png")
|
||||
if os.path.exists(src_cover):
|
||||
shutil.copy2(src_cover, os.path.join(target_folder, "cover.png"))
|
||||
# Also copy cover_art.png to prevent regeneration if logic allows
|
||||
if os.path.exists(os.path.join(src_folder, "cover_art.png")):
|
||||
shutil.copy2(os.path.join(src_folder, "cover_art.png"), os.path.join(target_folder, "cover_art.png"))
|
||||
utils.log("SYSTEM", f" -> Copied cover for Book {b_num}")
|
||||
|
||||
# 1.5 Copy Forward Logic (Series Optimization)
|
||||
# Check for previous runs and copy completed books to skip re-generation
|
||||
runs_dir = os.path.join(project_path, "runs", "bible")
|
||||
if allow_copy and os.path.exists(runs_dir):
|
||||
is_series = False
|
||||
if os.path.exists(bible_path):
|
||||
bible_data = utils.load_json(bible_path)
|
||||
if bible_data:
|
||||
is_series = bible_data.get('project_metadata', {}).get('is_series', False)
|
||||
|
||||
runs_dir = os.path.join(project_path, "runs")
|
||||
|
||||
# Only copy if explicitly requested AND it's a series (Standalone books get fresh re-rolls)
|
||||
if allow_copy and is_series and os.path.exists(runs_dir):
|
||||
# Get all run folders except current
|
||||
all_runs = [d for d in os.listdir(runs_dir) if d.startswith("run_") and d != f"run_{run_id}"]
|
||||
# Sort by ID (ascending)
|
||||
@@ -63,7 +141,7 @@ def generate_book_task(run_id, project_path, bible_path, allow_copy=True):
|
||||
if all_runs:
|
||||
latest_run_dir = os.path.join(runs_dir, all_runs[-1])
|
||||
current_run_dir = os.path.join(runs_dir, f"run_{run_id}")
|
||||
if not os.path.exists(current_run_dir): os.makedirs(current_run_dir)
|
||||
os.makedirs(current_run_dir, exist_ok=True)
|
||||
|
||||
utils.log("SYSTEM", f"Checking previous run ({all_runs[-1]}) for completed books...")
|
||||
for item in os.listdir(latest_run_dir):
|
||||
@@ -73,16 +151,18 @@ def generate_book_task(run_id, project_path, bible_path, allow_copy=True):
|
||||
src = os.path.join(latest_run_dir, item)
|
||||
dst = os.path.join(current_run_dir, item)
|
||||
try:
|
||||
shutil.copytree(src, dst)
|
||||
shutil.copytree(src, dst, dirs_exist_ok=True)
|
||||
utils.log("SYSTEM", f" -> Copied {item} (Skipping generation).")
|
||||
except Exception as e:
|
||||
utils.log("SYSTEM", f" -> Failed to copy {item}: {e}")
|
||||
|
||||
# 2. Run Generation
|
||||
# We call the existing entry point
|
||||
main.run_generation(bible_path, specific_run_id=run_id)
|
||||
from main import run_generation
|
||||
run_generation(bible_path, specific_run_id=run_id)
|
||||
|
||||
utils.log("SYSTEM", "Job Complete.")
|
||||
utils.update_progress(100)
|
||||
status = "completed"
|
||||
|
||||
except Exception as e:
|
||||
@@ -91,7 +171,7 @@ def generate_book_task(run_id, project_path, bible_path, allow_copy=True):
|
||||
|
||||
# 3. Calculate Cost & Cleanup
|
||||
# Use the specific run folder we know main.py used
|
||||
run_dir = os.path.join(project_path, "runs", "bible", f"run_{run_id}")
|
||||
run_dir = os.path.join(project_path, "runs", f"run_{run_id}")
|
||||
|
||||
total_cost = 0.0
|
||||
final_log_path = initial_log
|
||||
@@ -120,7 +200,7 @@ def generate_book_task(run_id, project_path, bible_path, allow_copy=True):
|
||||
# 4. Update Database with Final Status
|
||||
try:
|
||||
with sqlite3.connect(db_path, timeout=10) as conn:
|
||||
conn.execute("UPDATE run SET status = ?, cost = ?, end_time = ?, log_file = ? WHERE id = ?",
|
||||
conn.execute("UPDATE run SET status = ?, cost = ?, end_time = ?, log_file = ?, progress = 100 WHERE id = ?",
|
||||
(status, total_cost, datetime.utcnow(), final_log_path, run_id))
|
||||
except Exception as e:
|
||||
print(f"Failed to update run status in DB: {e}")
|
||||
@@ -132,11 +212,20 @@ def regenerate_artifacts_task(run_id, project_path, feedback=None):
|
||||
# Hook up Database Logging & Status
|
||||
db_path = os.path.join(config.DATA_DIR, "bookapp.db")
|
||||
|
||||
# Truncate log file to ensure clean slate
|
||||
log_filename = f"system_log_{run_id}.txt"
|
||||
initial_log = os.path.join(project_path, log_filename)
|
||||
with open(initial_log, 'w', encoding='utf-8') as f: f.write("")
|
||||
utils.set_log_file(initial_log)
|
||||
# Determine log file path: Prefer the existing web_console.log in the run dir
|
||||
run_dir = os.path.join(project_path, "runs", f"run_{run_id}")
|
||||
log_file = os.path.join(run_dir, "web_console.log")
|
||||
|
||||
# Fallback to project root temp file if run dir doesn't exist (unlikely for regeneration)
|
||||
if not os.path.exists(run_dir):
|
||||
log_file = os.path.join(project_path, f"system_log_{run_id}.txt")
|
||||
|
||||
try:
|
||||
with open(log_file, 'a', encoding='utf-8') as f:
|
||||
f.write(f"\n\n[{datetime.utcnow().strftime('%H:%M:%S')}] --- REGENERATION STARTED ---\n")
|
||||
except: pass
|
||||
|
||||
utils.set_log_file(log_file)
|
||||
|
||||
utils.set_log_callback(lambda p, m: db_log_callback(db_path, run_id, p, m))
|
||||
try:
|
||||
@@ -147,18 +236,21 @@ def regenerate_artifacts_task(run_id, project_path, feedback=None):
|
||||
utils.log("SYSTEM", "Starting Artifact Regeneration...")
|
||||
|
||||
# 1. Setup Paths
|
||||
run_dir = os.path.join(project_path, "runs", "bible", f"run_{run_id}")
|
||||
|
||||
# Detect Book Subfolder
|
||||
book_dir = run_dir
|
||||
if os.path.exists(run_dir):
|
||||
subdirs = sorted([d for d in os.listdir(run_dir) if os.path.isdir(os.path.join(run_dir, d)) and d.startswith("Book_")])
|
||||
subdirs = utils.get_sorted_book_folders(run_dir)
|
||||
if subdirs: book_dir = os.path.join(run_dir, subdirs[0])
|
||||
|
||||
bible_path = os.path.join(project_path, "bible.json")
|
||||
|
||||
if not os.path.exists(run_dir) or not os.path.exists(bible_path):
|
||||
utils.log("ERROR", "Run directory or Bible not found.")
|
||||
try:
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
conn.execute("UPDATE run SET status = 'failed' WHERE id = ?", (run_id,))
|
||||
except: pass
|
||||
return
|
||||
|
||||
# 2. Load Data
|
||||
@@ -168,6 +260,10 @@ def regenerate_artifacts_task(run_id, project_path, feedback=None):
|
||||
|
||||
if not os.path.exists(final_bp_path) or not os.path.exists(ms_path):
|
||||
utils.log("ERROR", f"Blueprint or Manuscript not found in {book_dir}")
|
||||
try:
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
conn.execute("UPDATE run SET status = 'failed' WHERE id = ?", (run_id,))
|
||||
except: pass
|
||||
return
|
||||
|
||||
bp = utils.load_json(final_bp_path)
|
||||
@@ -196,15 +292,15 @@ def regenerate_artifacts_task(run_id, project_path, feedback=None):
|
||||
|
||||
# 4. Regenerate
|
||||
try:
|
||||
main.ai.init_models()
|
||||
ai.init_models()
|
||||
|
||||
tracking = None
|
||||
events_path = os.path.join(book_dir, "tracking_events.json")
|
||||
if os.path.exists(events_path):
|
||||
tracking = {"events": utils.load_json(events_path), "characters": utils.load_json(os.path.join(book_dir, "tracking_characters.json"))}
|
||||
|
||||
main.marketing.generate_cover(bp, book_dir, tracking, feedback=feedback)
|
||||
main.export.compile_files(bp, ms, book_dir)
|
||||
marketing.generate_cover(bp, book_dir, tracking, feedback=feedback)
|
||||
export.compile_files(bp, ms, book_dir)
|
||||
|
||||
utils.log("SYSTEM", "Regeneration Complete.")
|
||||
final_status = 'completed'
|
||||
@@ -215,4 +311,60 @@ def regenerate_artifacts_task(run_id, project_path, feedback=None):
|
||||
try:
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
conn.execute("UPDATE run SET status = ? WHERE id = ?", (final_status, run_id))
|
||||
except: pass
|
||||
except: pass
|
||||
|
||||
@huey.task()
|
||||
def rewrite_chapter_task(run_id, project_path, book_folder, chap_num, instruction):
|
||||
"""
|
||||
Background task to rewrite a single chapter and propagate changes.
|
||||
"""
|
||||
try:
|
||||
run_dir = os.path.join(project_path, "runs", f"run_{run_id}")
|
||||
|
||||
# --- Setup Logging for Rewrite ---
|
||||
# Append to the existing run log so it appears in the UI
|
||||
log_file = os.path.join(run_dir, "web_console.log")
|
||||
if not os.path.exists(log_file):
|
||||
log_file = os.path.join(project_path, f"system_log_{run_id}.txt")
|
||||
|
||||
utils.set_log_file(log_file)
|
||||
db_path = os.path.join(config.DATA_DIR, "bookapp.db")
|
||||
utils.set_log_callback(lambda p, m: db_log_callback(db_path, run_id, p, m))
|
||||
# ---------------------------------
|
||||
|
||||
book_path = os.path.join(run_dir, book_folder)
|
||||
|
||||
ms_path = os.path.join(book_path, "manuscript.json")
|
||||
bp_path = os.path.join(book_path, "final_blueprint.json")
|
||||
|
||||
if not (os.path.exists(ms_path) and os.path.exists(bp_path)):
|
||||
utils.log("ERROR", f"Rewrite failed: files not found for run {run_id}/{book_folder}")
|
||||
return False
|
||||
|
||||
ms = utils.load_json(ms_path)
|
||||
bp = utils.load_json(bp_path)
|
||||
|
||||
ai.init_models()
|
||||
|
||||
new_text = story.rewrite_chapter_content(bp, ms, chap_num, instruction, book_path)
|
||||
|
||||
if new_text:
|
||||
for ch in ms:
|
||||
if ch.get('num') == chap_num:
|
||||
ch['content'] = new_text
|
||||
break
|
||||
|
||||
# Save the primary rewrite immediately
|
||||
with open(ms_path, 'w') as f: json.dump(ms, f, indent=2)
|
||||
|
||||
updated_ms = story.check_and_propagate(bp, ms, chap_num, book_path)
|
||||
if updated_ms:
|
||||
ms = updated_ms
|
||||
|
||||
with open(ms_path, 'w') as f: json.dump(ms, f, indent=2)
|
||||
export.compile_files(bp, ms, book_path)
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
utils.log("ERROR", f"Rewrite task exception for run {run_id}/{book_folder}: {e}")
|
||||
return False
|
||||
Reference in New Issue
Block a user