1061 lines
47 KiB
Python
1061 lines
47 KiB
Python
import json
|
|
import os
|
|
import random
|
|
import time
|
|
import config
|
|
from modules import ai
|
|
from . import utils
|
|
|
|
def get_style_guidelines():
|
|
defaults = {
|
|
"ai_isms": [
|
|
'testament to', 'tapestry', 'shiver down spine', 'unspoken agreement',
|
|
'palpable tension', 'a sense of', 'suddenly', 'in that moment',
|
|
'symphony of', 'dance of', 'azure', 'cerulean'
|
|
],
|
|
"filter_words": [
|
|
'felt', 'saw', 'heard', 'realized', 'decided', 'noticed', 'knew', 'thought'
|
|
]
|
|
}
|
|
path = os.path.join(config.DATA_DIR, "style_guidelines.json")
|
|
if os.path.exists(path):
|
|
try:
|
|
user_data = utils.load_json(path)
|
|
if user_data:
|
|
if 'ai_isms' in user_data: defaults['ai_isms'] = user_data['ai_isms']
|
|
if 'filter_words' in user_data: defaults['filter_words'] = user_data['filter_words']
|
|
except: pass
|
|
else:
|
|
try:
|
|
with open(path, 'w') as f: json.dump(defaults, f, indent=2)
|
|
except: pass
|
|
return defaults
|
|
|
|
def refresh_style_guidelines(model, folder=None):
|
|
utils.log("SYSTEM", "Refreshing Style Guidelines via AI...")
|
|
current = get_style_guidelines()
|
|
|
|
prompt = f"""
|
|
Act as a Literary Editor. Update our 'Banned Words' lists for AI writing.
|
|
|
|
CURRENT AI-ISMS (Cliches to avoid):
|
|
{json.dumps(current.get('ai_isms', []))}
|
|
|
|
CURRENT FILTER WORDS (Distancing language):
|
|
{json.dumps(current.get('filter_words', []))}
|
|
|
|
TASK:
|
|
1. Review the lists. Remove any that are too common/safe (false positives).
|
|
2. Add new common AI tropes (e.g. 'neon-lit', 'bustling', 'a sense of', 'mined', 'delved').
|
|
3. Ensure the list is robust but not paralyzing.
|
|
|
|
RETURN JSON: {{ "ai_isms": [strings], "filter_words": [strings] }}
|
|
"""
|
|
try:
|
|
response = model.generate_content(prompt)
|
|
if folder: utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
new_data = json.loads(utils.clean_json(response.text))
|
|
|
|
# Validate
|
|
if 'ai_isms' in new_data and 'filter_words' in new_data:
|
|
path = os.path.join(config.DATA_DIR, "style_guidelines.json")
|
|
with open(path, 'w') as f: json.dump(new_data, f, indent=2)
|
|
utils.log("SYSTEM", "Style Guidelines updated.")
|
|
return new_data
|
|
except Exception as e:
|
|
utils.log("SYSTEM", f"Failed to refresh guidelines: {e}")
|
|
return current
|
|
|
|
def merge_selected_changes(original, draft, selected_keys):
|
|
"""Helper to merge specific fields from draft to original bible."""
|
|
# Sort keys to ensure deterministic order
|
|
def sort_key(k):
|
|
return [int(p) if p.isdigit() else p for p in k.split('.')]
|
|
selected_keys.sort(key=sort_key)
|
|
|
|
for key in selected_keys:
|
|
parts = key.split('.')
|
|
|
|
# Metadata: meta.title
|
|
if parts[0] == 'meta' and len(parts) == 2:
|
|
field = parts[1]
|
|
if field == 'tone':
|
|
original['project_metadata']['style']['tone'] = draft['project_metadata']['style']['tone']
|
|
elif field in original['project_metadata']:
|
|
original['project_metadata'][field] = draft['project_metadata'][field]
|
|
|
|
# Characters: char.0
|
|
elif parts[0] == 'char' and len(parts) >= 2:
|
|
idx = int(parts[1])
|
|
if idx < len(draft['characters']):
|
|
if idx < len(original['characters']):
|
|
original['characters'][idx] = draft['characters'][idx]
|
|
else:
|
|
original['characters'].append(draft['characters'][idx])
|
|
|
|
# Books: book.1.title
|
|
elif parts[0] == 'book' and len(parts) >= 2:
|
|
book_num = int(parts[1])
|
|
orig_book = next((b for b in original['books'] if b['book_number'] == book_num), None)
|
|
draft_book = next((b for b in draft['books'] if b['book_number'] == book_num), None)
|
|
|
|
if draft_book:
|
|
if not orig_book:
|
|
original['books'].append(draft_book)
|
|
original['books'].sort(key=lambda x: x.get('book_number', 999))
|
|
continue
|
|
|
|
if len(parts) == 2:
|
|
orig_book['title'] = draft_book['title']
|
|
orig_book['manual_instruction'] = draft_book['manual_instruction']
|
|
|
|
elif len(parts) == 4 and parts[2] == 'beat':
|
|
beat_idx = int(parts[3])
|
|
if beat_idx < len(draft_book['plot_beats']):
|
|
while len(orig_book['plot_beats']) <= beat_idx:
|
|
orig_book['plot_beats'].append("")
|
|
orig_book['plot_beats'][beat_idx] = draft_book['plot_beats'][beat_idx]
|
|
return original
|
|
|
|
def filter_characters(chars):
|
|
"""Removes placeholder characters generated by AI."""
|
|
blacklist = ['name', 'character name', 'role', 'protagonist', 'antagonist', 'love interest', 'unknown', 'tbd', 'todo', 'hero', 'villain', 'main character', 'side character']
|
|
return [c for c in chars if c.get('name') and c.get('name').lower().strip() not in blacklist]
|
|
|
|
def enrich(bp, folder, context=""):
|
|
utils.log("ENRICHER", "Fleshing out details from description...")
|
|
|
|
# If book_metadata is missing, create empty dict so AI can fill it
|
|
if 'book_metadata' not in bp: bp['book_metadata'] = {}
|
|
if 'characters' not in bp: bp['characters'] = []
|
|
if 'plot_beats' not in bp: bp['plot_beats'] = []
|
|
|
|
prompt = f"""
|
|
You are a Creative Director.
|
|
The user has provided a minimal description. You must build a full Book Bible.
|
|
|
|
USER DESCRIPTION: "{bp.get('manual_instruction', 'A generic story')}"
|
|
CONTEXT (Sequel): {context}
|
|
|
|
TASK:
|
|
1. Generate a catchy Title.
|
|
2. Define the Genre and Tone.
|
|
3. Determine the Time Period (e.g. "Modern", "1920s", "Sci-Fi Future").
|
|
4. Define Formatting Rules for text messages, thoughts, and chapter headers.
|
|
5. Create Protagonist and Antagonist/Love Interest.
|
|
- IF SEQUEL: Decide if we continue with previous protagonists or shift to side characters based on USER DESCRIPTION.
|
|
- IF NEW CHARACTERS: Create them.
|
|
- IF RETURNING: Reuse details from CONTEXT.
|
|
6. Outline 5-7 core Plot Beats.
|
|
7. Define a 'structure_prompt' describing the narrative arc (e.g. "Hero's Journey", "3-Act Structure", "Detective Procedural").
|
|
|
|
RETURN JSON in this EXACT format:
|
|
{{
|
|
"book_metadata": {{ "title": "Book Title", "genre": "Genre", "content_warnings": ["Violence", "Major Character Death"], "structure_prompt": "...", "style": {{ "tone": "Tone", "time_period": "Modern", "formatting_rules": ["Chapter Headers: Number + Title", "Text Messages: Italic", "Thoughts: Italic"] }} }},
|
|
"characters": [ {{ "name": "John Doe", "role": "Protagonist", "description": "Description", "key_events": ["Planned injury in Act 2"] }} ],
|
|
"plot_beats": [ "Beat 1", "Beat 2", "..." ]
|
|
}}
|
|
"""
|
|
try:
|
|
# Merge AI response with existing data (don't overwrite if user provided specific keys)
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
response_text = response.text
|
|
cleaned_json = utils.clean_json(response_text)
|
|
ai_data = json.loads(cleaned_json)
|
|
|
|
# Smart Merge: Only fill missing fields
|
|
if 'book_metadata' not in bp:
|
|
bp['book_metadata'] = {}
|
|
|
|
if 'title' not in bp['book_metadata']:
|
|
bp['book_metadata']['title'] = ai_data.get('book_metadata', {}).get('title')
|
|
if 'structure_prompt' not in bp['book_metadata']:
|
|
bp['book_metadata']['structure_prompt'] = ai_data.get('book_metadata', {}).get('structure_prompt')
|
|
if 'content_warnings' not in bp['book_metadata']:
|
|
bp['book_metadata']['content_warnings'] = ai_data.get('book_metadata', {}).get('content_warnings', [])
|
|
|
|
# Merge Style (Flexible)
|
|
if 'style' not in bp['book_metadata']:
|
|
bp['book_metadata']['style'] = {}
|
|
|
|
# Handle AI returning legacy keys or new style key
|
|
source_style = ai_data.get('book_metadata', {}).get('style', {})
|
|
|
|
for k, v in source_style.items():
|
|
if k not in bp['book_metadata']['style']:
|
|
bp['book_metadata']['style'][k] = v
|
|
|
|
if 'characters' not in bp or not bp['characters']:
|
|
bp['characters'] = ai_data.get('characters', [])
|
|
|
|
# Filter out default names
|
|
if 'characters' in bp:
|
|
bp['characters'] = filter_characters(bp['characters'])
|
|
|
|
if 'plot_beats' not in bp or not bp['plot_beats']:
|
|
bp['plot_beats'] = ai_data.get('plot_beats', [])
|
|
|
|
return bp
|
|
except Exception as e:
|
|
utils.log("ENRICHER", f"Enrichment failed: {e}")
|
|
return bp
|
|
|
|
def plan_structure(bp, folder):
|
|
utils.log("ARCHITECT", "Creating structure...")
|
|
|
|
structure_type = bp.get('book_metadata', {}).get('structure_prompt')
|
|
|
|
if not structure_type:
|
|
label = bp.get('length_settings', {}).get('label', 'Novel')
|
|
structures = {
|
|
"Chapter Book": "Create a simple episodic structure with clear chapter hooks.",
|
|
"Young Adult": "Create a character-driven arc with high emotional stakes and a clear 'Coming of Age' theme.",
|
|
"Flash Fiction": "Create a single, impactful scene structure with a twist.",
|
|
"Short Story": "Create a concise narrative arc (Inciting Incident -> Rising Action -> Climax -> Resolution).",
|
|
"Novella": "Create a standard 3-Act Structure.",
|
|
"Novel": "Create a detailed 3-Act Structure with A and B plots.",
|
|
"Epic": "Create a complex, multi-arc structure (Hero's Journey) with extensive world-building events."
|
|
}
|
|
structure_type = structures.get(label, "Create a 3-Act Structure.")
|
|
|
|
beats_context = []
|
|
|
|
if not beats_context:
|
|
beats_context = bp.get('plot_beats', [])
|
|
|
|
prompt = f"{structure_type}\nTITLE: {bp['book_metadata']['title']}\nBEATS: {json.dumps(beats_context)}\nReturn JSON: {{'events': [{{'description':'...', 'purpose':'...'}}]}}"
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
return json.loads(utils.clean_json(response.text))['events']
|
|
except:
|
|
return []
|
|
|
|
def expand(events, pass_num, target_chapters, bp, folder):
|
|
utils.log("ARCHITECT", f"Expansion pass {pass_num} | Current Beats: {len(events)} | Target Chaps: {target_chapters}")
|
|
|
|
beats_context = []
|
|
|
|
if not beats_context:
|
|
beats_context = bp.get('plot_beats', [])
|
|
|
|
prompt = f"""
|
|
You are a Story Architect.
|
|
Goal: Flesh out this outline for a {target_chapters}-chapter book.
|
|
Current Status: {len(events)} beats.
|
|
|
|
ORIGINAL OUTLINE:
|
|
{json.dumps(beats_context)}
|
|
|
|
INSTRUCTIONS:
|
|
1. Look for jumps in time or logic.
|
|
2. Insert new intermediate events to smooth the pacing.
|
|
3. Deepen subplots while staying true to the ORIGINAL OUTLINE.
|
|
4. Do NOT remove or drastically alter the original outline points; expand AROUND them.
|
|
|
|
CURRENT EVENTS:
|
|
{json.dumps(events)}
|
|
|
|
Return JSON: {{'events': [ ...updated full list... ]}}
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
new_events = json.loads(utils.clean_json(response.text))['events']
|
|
|
|
if len(new_events) > len(events):
|
|
utils.log("ARCHITECT", f" -> Added {len(new_events) - len(events)} new beats.")
|
|
elif len(str(new_events)) > len(str(events)) + 20:
|
|
utils.log("ARCHITECT", f" -> Fleshed out descriptions (Text grew by {len(str(new_events)) - len(str(events))} chars).")
|
|
else:
|
|
utils.log("ARCHITECT", " -> No significant changes.")
|
|
return new_events
|
|
except Exception as e:
|
|
utils.log("ARCHITECT", f" -> Pass skipped due to error: {e}")
|
|
return events
|
|
|
|
def create_chapter_plan(events, bp, folder):
|
|
utils.log("ARCHITECT", "Finalizing Chapters...")
|
|
target = bp['length_settings']['chapters']
|
|
words = bp['length_settings'].get('words', 'Flexible')
|
|
|
|
include_prologue = bp.get('length_settings', {}).get('include_prologue', False)
|
|
include_epilogue = bp.get('length_settings', {}).get('include_epilogue', False)
|
|
|
|
structure_instructions = ""
|
|
if include_prologue: structure_instructions += "- Include a 'Prologue' (chapter_number: 0) to set the scene.\n"
|
|
if include_epilogue: structure_instructions += "- Include an 'Epilogue' (chapter_number: 'Epilogue') to wrap up.\n"
|
|
|
|
meta = bp.get('book_metadata', {})
|
|
style = meta.get('style', {})
|
|
pov_chars = style.get('pov_characters', [])
|
|
pov_instruction = ""
|
|
if pov_chars:
|
|
pov_instruction = f"- Assign a 'pov_character' for each chapter from this list: {json.dumps(pov_chars)}."
|
|
|
|
prompt = f"""
|
|
Group events into Chapters.
|
|
TARGET CHAPTERS: {target} (Approximate. Feel free to adjust +/- 20% for better pacing).
|
|
TARGET WORDS: {words} (Total for the book).
|
|
|
|
INSTRUCTIONS:
|
|
- Vary chapter pacing. Options: 'Very Fast', 'Fast', 'Standard', 'Slow', 'Very Slow'.
|
|
- Assign an estimated word count to each chapter based on its pacing and content.
|
|
{structure_instructions}
|
|
{pov_instruction}
|
|
|
|
EVENTS: {json.dumps(events)}
|
|
Return JSON: [{{'chapter_number':1, 'title':'...', 'pov_character': 'Name', 'pacing': 'Standard', 'estimated_words': 2000, 'beats':[...]}}]
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
plan = json.loads(utils.clean_json(response.text))
|
|
|
|
target_str = str(words).lower().replace(',', '').replace('k', '000').replace('+', '').replace(' ', '')
|
|
target_val = 0
|
|
if '-' in target_str:
|
|
try:
|
|
parts = target_str.split('-')
|
|
target_val = int((int(parts[0]) + int(parts[1])) / 2)
|
|
except: pass
|
|
else:
|
|
try: target_val = int(target_str)
|
|
except: pass
|
|
|
|
if target_val > 0:
|
|
variance = random.uniform(0.90, 1.10)
|
|
target_val = int(target_val * variance)
|
|
utils.log("ARCHITECT", f"Target adjusted with variance ({variance:.2f}x): {target_val} words.")
|
|
|
|
current_sum = sum(int(c.get('estimated_words', 0)) for c in plan)
|
|
if current_sum > 0:
|
|
factor = target_val / current_sum
|
|
utils.log("ARCHITECT", f"Adjusting chapter lengths by {factor:.2f}x to match target.")
|
|
for c in plan:
|
|
c['estimated_words'] = int(c.get('estimated_words', 0) * factor)
|
|
|
|
return plan
|
|
except Exception as e:
|
|
utils.log("ARCHITECT", f"Failed to create chapter plan: {e}")
|
|
return []
|
|
|
|
def update_tracking(folder, chapter_num, chapter_text, current_tracking):
|
|
utils.log("TRACKER", f"Updating world state & character visuals for Ch {chapter_num}...")
|
|
|
|
prompt = f"""
|
|
Analyze this chapter text to update the Story Bible.
|
|
|
|
CURRENT TRACKING DATA:
|
|
{json.dumps(current_tracking)}
|
|
|
|
NEW CHAPTER TEXT:
|
|
{chapter_text[:500000]}
|
|
|
|
TASK:
|
|
1. EVENTS: Append 1-3 concise bullet points summarizing key plot events in this chapter to the 'events' list.
|
|
2. CHARACTERS: Update entries for any characters appearing in the scene.
|
|
- "descriptors": List of strings. Add PERMANENT physical traits (height, hair, eyes), specific items (jewelry, weapons). Avoid duplicates.
|
|
- "likes_dislikes": List of strings. Add specific preferences, likes, or dislikes mentioned (e.g., "Hates coffee", "Loves jazz").
|
|
- "last_worn": String. Update if specific clothing is described. IMPORTANT: If a significant time jump occurred (e.g. next day) and no new clothing is described, reset this to "Unknown".
|
|
- "major_events": List of strings. Log significant life-altering events occurring in THIS chapter (e.g. "Lost an arm", "Married", "Betrayed by X").
|
|
3. CONTENT_WARNINGS: List of strings. Identify specific triggers present in this chapter (e.g. "Graphic Violence", "Sexual Assault", "Torture", "Self-Harm"). Append to existing list.
|
|
|
|
RETURN JSON with the SAME structure as CURRENT TRACKING DATA (events list, characters dict, content_warnings list).
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
new_data = json.loads(utils.clean_json(response.text))
|
|
return new_data
|
|
except Exception as e:
|
|
utils.log("TRACKER", f"Failed to update tracking: {e}")
|
|
return current_tracking
|
|
|
|
def evaluate_chapter_quality(text, chapter_title, genre, model, folder):
|
|
guidelines = get_style_guidelines()
|
|
ai_isms = "', '".join(guidelines['ai_isms'])
|
|
fw_examples = ", ".join([f"'He {w}'" for w in guidelines['filter_words'][:5]])
|
|
|
|
prompt = f"""
|
|
Act as a World-Class Literary Editor (e.g., Maxwell Perkins). Analyze this chapter draft with extreme scrutiny.
|
|
CHAPTER TITLE: {chapter_title}
|
|
GENRE: {genre}
|
|
|
|
STRICT PROHIBITIONS (Automatic deduction):
|
|
- "AI-isms": '{ai_isms}'. (Evaluate in context of {genre}. Allow genre-appropriate tropes, but penalize robotic clichés).
|
|
- Filter Words: {fw_examples}, etc. (Show the sensation/action, don't state the internal process).
|
|
- Stilted Dialogue: Characters speaking in perfect paragraphs without interruptions, slang, or subtext.
|
|
- White Room Syndrome: Dialogue occurring in a void without interaction with the setting/props.
|
|
- "As You Know, Bob": Characters explaining things to each other that they both already know.
|
|
- Summary Mode: Summarizing conversation or action instead of dramatizing it (e.g. "They discussed the plan" vs writing the dialogue).
|
|
|
|
CRITERIA:
|
|
1. ENGAGEMENT & TENSION: Does the story grip the reader from the first line? Is there conflict or tension in every scene?
|
|
2. SCENE EXECUTION: Is the middle of the chapter fully fleshed out? Does it avoid "sagging" or summarizing key moments?
|
|
3. VOICE & TONE: Is the narrative voice distinct? Does it match the genre?
|
|
4. SENSORY IMMERSION: Does the text engage all five senses (smell, sound, touch, etc.)?
|
|
5. SHOW, DON'T TELL: Are emotions shown through physical reactions and subtext?
|
|
6. CHARACTER AGENCY: Do characters drive the plot through active choices?
|
|
7. PACING: Does the chapter feel rushed? Does the ending land with impact, or does it cut off too abruptly?
|
|
|
|
Rate on a scale of 1-10. (Be harsh. 10 is Pulitzer level. 6 is average. Anything below 8 needs work).
|
|
|
|
Return JSON: {{
|
|
'score': int,
|
|
'critique': 'Detailed analysis of flaws, citing specific examples from the text.',
|
|
'actionable_feedback': 'List of 3-5 specific, ruthless instructions for the rewrite (e.g. "Expand the middle dialogue", "Add sensory details about the rain", "Dramatize the argument instead of summarizing it").'
|
|
}}
|
|
"""
|
|
try:
|
|
response = model.generate_content([prompt, text[:30000]])
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
data = json.loads(utils.clean_json(response.text))
|
|
|
|
critique_text = data.get('critique', 'No critique provided.')
|
|
if data.get('actionable_feedback'):
|
|
critique_text += "\n\nREQUIRED FIXES:\n" + str(data.get('actionable_feedback'))
|
|
|
|
return data.get('score', 0), critique_text
|
|
except Exception as e:
|
|
return 0, f"Evaluation error: {str(e)}"
|
|
|
|
def check_pacing(bp, summary, last_chapter_text, last_chapter_data, remaining_chapters, folder):
|
|
utils.log("ARCHITECT", "Checking pacing and structure health...")
|
|
|
|
if not remaining_chapters:
|
|
return None
|
|
|
|
meta = bp.get('book_metadata', {})
|
|
genre = meta.get('genre', 'Fiction')
|
|
|
|
prompt = f"""
|
|
Act as a Senior Structural Editor.
|
|
We just finished Chapter {last_chapter_data['chapter_number']}: "{last_chapter_data['title']}".
|
|
|
|
STORY SO FAR (Summary):
|
|
{summary[-3000:]}
|
|
|
|
JUST WRITTEN (Last 2000 chars):
|
|
{last_chapter_text[-2000:]}
|
|
|
|
UPCOMING CHAPTERS (Next 3):
|
|
{json.dumps([c['title'] for c in remaining_chapters[:3]])}
|
|
|
|
TOTAL REMAINING: {len(remaining_chapters)} chapters.
|
|
|
|
ANALYSIS TASK:
|
|
Determine if the story is moving too fast (Rushed) or too slow (Dragging) based on the {genre} genre.
|
|
|
|
DECISION RULES:
|
|
- If the last chapter skipped over major emotional reactions, travel, or necessary setup -> ADD_BRIDGE.
|
|
- If the last chapter already covered the events of the NEXT chapter -> CUT_NEXT.
|
|
- If the pacing is fine -> OK.
|
|
|
|
RETURN JSON:
|
|
{{
|
|
"status": "ok" or "add_bridge" or "cut_next",
|
|
"reason": "Explanation...",
|
|
"new_chapter": {{ "title": "...", "beats": ["..."], "pov_character": "..." }} (Required if add_bridge)
|
|
}}
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
return json.loads(utils.clean_json(response.text))
|
|
except Exception as e:
|
|
utils.log("ARCHITECT", f"Pacing check failed: {e}")
|
|
return None
|
|
|
|
def create_initial_persona(bp, folder):
|
|
utils.log("SYSTEM", "Generating initial Author Persona based on genre/tone...")
|
|
meta = bp.get('book_metadata', {})
|
|
style = meta.get('style', {})
|
|
|
|
prompt = f"""
|
|
Create a fictional 'Author Persona' best suited to write this book.
|
|
|
|
BOOK DETAILS:
|
|
Title: {meta.get('title')}
|
|
Genre: {meta.get('genre')}
|
|
Tone: {style.get('tone')}
|
|
Target Audience: {meta.get('target_audience')}
|
|
|
|
TASK:
|
|
Create a profile for the ideal writer of this book.
|
|
Return JSON: {{ "name": "Pen Name", "bio": "Description of writing style (voice, sentence structure, vocabulary)...", "age": "...", "gender": "..." }}
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
return json.loads(utils.clean_json(response.text))
|
|
except Exception as e:
|
|
utils.log("SYSTEM", f"Persona generation failed: {e}")
|
|
return {"name": "AI Author", "bio": "Standard, balanced writing style."}
|
|
|
|
def refine_persona(bp, text, folder):
|
|
utils.log("SYSTEM", "Refining Author Persona based on recent chapters...")
|
|
ad = bp.get('book_metadata', {}).get('author_details', {})
|
|
current_bio = ad.get('bio', 'Standard style.')
|
|
|
|
prompt = f"""
|
|
Act as a Literary Stylist. Analyze this text sample from the book.
|
|
|
|
TEXT:
|
|
{text[:3000]}
|
|
|
|
CURRENT AUTHOR BIO:
|
|
{current_bio}
|
|
|
|
TASK:
|
|
Refine the Author Bio to better match the actual text produced.
|
|
Highlight specific stylistic quirks, sentence patterns, or vocabulary choices found in the text.
|
|
The goal is to ensure future chapters sound exactly like this one.
|
|
|
|
Return JSON: {{ "bio": "Updated bio..." }}
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
new_bio = json.loads(utils.clean_json(response.text)).get('bio')
|
|
if new_bio:
|
|
ad['bio'] = new_bio
|
|
utils.log("SYSTEM", " -> Persona bio updated.")
|
|
return ad
|
|
except: pass
|
|
return ad
|
|
|
|
def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None):
|
|
pacing = chap.get('pacing', 'Standard')
|
|
est_words = chap.get('estimated_words', 'Flexible')
|
|
utils.log("WRITER", f"Drafting Ch {chap['chapter_number']} ({pacing} | ~{est_words} words): {chap['title']}")
|
|
ls = bp['length_settings']
|
|
meta = bp.get('book_metadata', {})
|
|
style = meta.get('style', {})
|
|
genre = meta.get('genre', 'Fiction')
|
|
|
|
pov_char = chap.get('pov_character', '')
|
|
|
|
ad = meta.get('author_details', {})
|
|
if not ad and 'author_bio' in meta:
|
|
persona_info = meta['author_bio']
|
|
else:
|
|
persona_info = f"Name: {ad.get('name', meta.get('author', 'Unknown'))}\n"
|
|
if ad.get('age'): persona_info += f"Age: {ad['age']}\n"
|
|
if ad.get('gender'): persona_info += f"Gender: {ad['gender']}\n"
|
|
if ad.get('race'): persona_info += f"Race: {ad['race']}\n"
|
|
if ad.get('nationality'): persona_info += f"Nationality: {ad['nationality']}\n"
|
|
if ad.get('language'): persona_info += f"Language: {ad['language']}\n"
|
|
if ad.get('bio'): persona_info += f"Style/Bio: {ad['bio']}\n"
|
|
|
|
samples = []
|
|
if ad.get('sample_text'):
|
|
samples.append(f"--- SAMPLE PARAGRAPH ---\n{ad['sample_text']}")
|
|
|
|
if ad.get('sample_files'):
|
|
for fname in ad['sample_files']:
|
|
fpath = os.path.join(config.PERSONAS_DIR, fname)
|
|
if os.path.exists(fpath):
|
|
try:
|
|
with open(fpath, 'r', encoding='utf-8', errors='ignore') as f:
|
|
content = f.read(3000)
|
|
samples.append(f"--- SAMPLE FROM {fname} ---\n{content}...")
|
|
except: pass
|
|
|
|
if samples:
|
|
persona_info += "\nWRITING STYLE SAMPLES:\n" + "\n".join(samples)
|
|
|
|
char_visuals = ""
|
|
if tracking and 'characters' in tracking:
|
|
char_visuals = "\nCHARACTER TRACKING (Visuals & Preferences):\n"
|
|
for name, data in tracking['characters'].items():
|
|
desc = ", ".join(data.get('descriptors', []))
|
|
likes = ", ".join(data.get('likes_dislikes', []))
|
|
worn = data.get('last_worn', 'Unknown')
|
|
char_visuals += f"- {name}: {desc}\n * Likes/Dislikes: {likes}\n"
|
|
|
|
major = data.get('major_events', [])
|
|
if major: char_visuals += f" * Major Events: {'; '.join(major)}\n"
|
|
|
|
if worn and worn != 'Unknown':
|
|
char_visuals += f" * Last Worn: {worn} (NOTE: Only relevant if scene is continuous from previous chapter)\n"
|
|
|
|
style_block = "\n".join([f"- {k.replace('_', ' ').title()}: {v}" for k, v in style.items() if isinstance(v, (str, int, float))])
|
|
if 'tropes' in style and isinstance(style['tropes'], list):
|
|
style_block += f"\n- Tropes: {', '.join(style['tropes'])}"
|
|
|
|
if 'formatting_rules' in style and isinstance(style['formatting_rules'], list):
|
|
style_block += "\n- Formatting Rules:\n * " + "\n * ".join(style['formatting_rules'])
|
|
|
|
prev_context_block = ""
|
|
if prev_content:
|
|
trunc_content = prev_content[-3000:] if len(prev_content) > 3000 else prev_content
|
|
prev_context_block = f"\nPREVIOUS CHAPTER TEXT (For Tone & Continuity):\n{trunc_content}\n"
|
|
|
|
prompt = f"""
|
|
Write Chapter {chap['chapter_number']}: {chap['title']}
|
|
GENRE: {genre}
|
|
|
|
PACING GUIDE:
|
|
- Format: {ls.get('label', 'Story')}
|
|
- Chapter Pacing: {pacing}
|
|
- Target Word Count: ~{est_words} (Use this as a guide, but prioritize story flow. Allow flexibility.)
|
|
- POV Character: {pov_char if pov_char else 'Protagonist'}
|
|
|
|
STYLE & FORMATTING:
|
|
{style_block}
|
|
|
|
AUTHOR VOICE (CRITICAL):
|
|
{persona_info}
|
|
|
|
INSTRUCTION:
|
|
Write the scene.
|
|
- Start with the Chapter Header formatted as Markdown H1 (e.g. '# Chapter X: Title'). Follow the 'Formatting Rules' for the header style.
|
|
|
|
- DEEP POV: Immerse the reader in the POV character's immediate experience. Filter descriptions through their specific worldview and emotional state.
|
|
- SHOW, DON'T TELL: Focus on immediate action and internal reaction. Don't summarize feelings; show the physical manifestation of them.
|
|
- SENSORY DETAILS: Use specific, grounding sensory details (smell, touch, sound) rather than generic descriptions.
|
|
- AVOID CLICHÉS: Avoid common AI tropes (e.g., 'shiver down spine', 'palpable tension', 'unspoken agreement', 'testament to', 'tapestry of', 'azure', 'cerulean').
|
|
- MAINTAIN CONTINUITY: Pay close attention to the PREVIOUS CONTEXT. Characters must NOT know things that haven't happened yet or haven't been revealed to them.
|
|
- CHARACTER INTERACTIONS: If characters are meeting for the first time in the summary, treat them as strangers.
|
|
- SENTENCE VARIETY: Avoid repetitive sentence structures (e.g. starting multiple sentences with "He" or "She"). Vary sentence length to create rhythm.
|
|
- 'Very Fast': Rapid fire, pure action/dialogue, minimal description.
|
|
- 'Fast': Punchy, keep it moving.
|
|
- 'Standard': Balanced dialogue and description.
|
|
- 'Slow': Detailed, atmospheric, immersive.
|
|
- 'Very Slow': Deep introspection, heavy sensory detail, slow burn.
|
|
|
|
PREVIOUS CONTEXT (Story So Far): {prev_sum}
|
|
{prev_context_block}
|
|
CHARACTERS: {json.dumps(bp['characters'])}
|
|
{char_visuals}
|
|
SCENE BEATS: {json.dumps(chap['beats'])}
|
|
|
|
Output Markdown.
|
|
"""
|
|
current_text = ""
|
|
try:
|
|
resp_draft = ai.model_writer.generate_content(prompt)
|
|
utils.log_usage(folder, "writer-flash", resp_draft.usage_metadata)
|
|
current_text = resp_draft.text
|
|
except Exception as e:
|
|
utils.log("WRITER", f"⚠️ Failed Ch {chap['chapter_number']}: {e}")
|
|
return f"## Chapter {chap['chapter_number']} Failed\n\nError: {e}"
|
|
|
|
# Refinement Loop
|
|
max_attempts = 5
|
|
SCORE_AUTO_ACCEPT = 9
|
|
SCORE_PASSING = 7
|
|
|
|
best_score = 0
|
|
best_text = current_text
|
|
past_critiques = []
|
|
|
|
for attempt in range(1, max_attempts + 1):
|
|
utils.log("WRITER", f" -> Evaluating Ch {chap['chapter_number']} (Attempt {attempt}/{max_attempts})...")
|
|
score, critique = evaluate_chapter_quality(current_text, chap['title'], meta.get('genre', 'Fiction'), ai.model_logic, folder)
|
|
|
|
past_critiques.append(f"Attempt {attempt}: {critique}")
|
|
|
|
if "Evaluation error" in critique:
|
|
utils.log("WRITER", f" ⚠️ {critique}. Keeping current draft.")
|
|
if best_score == 0: best_text = current_text
|
|
break
|
|
|
|
utils.log("WRITER", f" Score: {score}/10. Critique: {critique}")
|
|
|
|
if score >= SCORE_AUTO_ACCEPT:
|
|
utils.log("WRITER", " 🌟 Auto-Accept threshold met.")
|
|
return current_text
|
|
|
|
if score > best_score:
|
|
best_score = score
|
|
best_text = current_text
|
|
|
|
if attempt == max_attempts:
|
|
if best_score >= SCORE_PASSING:
|
|
utils.log("WRITER", f" ✅ Max attempts reached. Accepting best score ({best_score}).")
|
|
return best_text
|
|
else:
|
|
utils.log("WRITER", f" ⚠️ Quality low ({best_score}/{SCORE_PASSING}) but max attempts reached. Proceeding.")
|
|
return best_text
|
|
|
|
utils.log("WRITER", f" -> Refining Ch {chap['chapter_number']} based on feedback...")
|
|
|
|
guidelines = get_style_guidelines()
|
|
fw_list = '", "'.join(guidelines['filter_words'])
|
|
|
|
# Exclude current critique from history to avoid duplication in prompt
|
|
history_str = "\n".join(past_critiques[:-1]) if len(past_critiques) > 1 else "None"
|
|
|
|
refine_prompt = f"""
|
|
Act as a Senior Editor. Rewrite this chapter to fix the issues identified below and ELEVATE the writing quality.
|
|
|
|
CRITIQUE TO ADDRESS (MANDATORY):
|
|
{critique}
|
|
|
|
PREVIOUS CRITIQUES (Reference):
|
|
{history_str}
|
|
|
|
STYLIZED REWRITE INSTRUCTIONS:
|
|
1. REMOVE FILTER WORDS: Delete "{fw_list}". Describe the image, sensation, or sound directly.
|
|
2. VARY SENTENCE STRUCTURE: Do not start consecutive sentences with "He", "She", or "The". Use introductory clauses and varying lengths.
|
|
3. SUBTEXT: Ensure dialogue implies meaning rather than stating it outright. People rarely say exactly what they mean.
|
|
4. GENRE CONSISTENCY: Ensure the tone matches {meta.get('genre', 'Fiction')}.
|
|
5. SETTING INTERACTION: Ensure characters interact with their environment (props, weather, lighting) during dialogue.
|
|
6. DRAMATIZE, DON'T SUMMARIZE: Expand summarized moments into full scenes with dialogue and action. Ensure the scene feels "full" and immersive.
|
|
7. STRONG VERBS: Avoid "was/were" constructions. Use active, specific verbs to drive the prose.
|
|
8. EMOTIONAL RESONANCE: Ensure the POV character's internal state is clear and drives the narrative.
|
|
|
|
STORY SO FAR:
|
|
{prev_sum}
|
|
{prev_context_block}
|
|
|
|
CURRENT DRAFT:
|
|
{current_text}
|
|
|
|
Return the polished, final version of the chapter in Markdown.
|
|
"""
|
|
try:
|
|
# Use Logic model (Pro) for refinement to ensure higher quality prose
|
|
resp_refine = ai.model_logic.generate_content(refine_prompt)
|
|
utils.log_usage(folder, "logic-pro", resp_refine.usage_metadata)
|
|
current_text = resp_refine.text
|
|
except Exception as e:
|
|
utils.log("WRITER", f"Refinement failed: {e}")
|
|
return best_text
|
|
|
|
return best_text
|
|
|
|
def harvest_metadata(bp, folder, full_manuscript):
|
|
utils.log("HARVESTER", "Scanning for new characters...")
|
|
full_text = "\n".join([c.get('content', '') for c in full_manuscript])[:500000]
|
|
|
|
prompt = f"""
|
|
Analyze this manuscript text.
|
|
EXISTING CHARACTERS: {json.dumps(bp['characters'])}
|
|
|
|
TASK: Identify NEW significant characters that appear in the text but are missing from the list.
|
|
RETURN JSON: {{'new_characters': [{{'name':'...', 'role':'...', 'description':'...'}}]}}
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
new_chars = json.loads(utils.clean_json(response.text)).get('new_characters', [])
|
|
if new_chars:
|
|
valid_chars = filter_characters(new_chars)
|
|
if valid_chars:
|
|
utils.log("HARVESTER", f"Found {len(valid_chars)} new chars.")
|
|
bp['characters'].extend(valid_chars)
|
|
except: pass
|
|
return bp
|
|
|
|
def update_persona_sample(bp, folder):
|
|
utils.log("SYSTEM", "Extracting author persona from manuscript...")
|
|
|
|
ms_path = os.path.join(folder, "manuscript.json")
|
|
if not os.path.exists(ms_path): return
|
|
ms = utils.load_json(ms_path)
|
|
if not ms: return
|
|
|
|
# 1. Extract Text Sample
|
|
full_text = "\n".join([c.get('content', '') for c in ms])
|
|
if len(full_text) < 500: return
|
|
|
|
# 2. Save Sample File
|
|
if not os.path.exists(config.PERSONAS_DIR): os.makedirs(config.PERSONAS_DIR)
|
|
|
|
meta = bp.get('book_metadata', {})
|
|
safe_title = utils.sanitize_filename(meta.get('title', 'book'))[:20]
|
|
timestamp = int(time.time())
|
|
filename = f"sample_{safe_title}_{timestamp}.txt"
|
|
filepath = os.path.join(config.PERSONAS_DIR, filename)
|
|
|
|
sample_text = full_text[:3000]
|
|
with open(filepath, 'w', encoding='utf-8') as f: f.write(sample_text)
|
|
|
|
# 3. Update or Create Persona
|
|
author_name = meta.get('author', 'Unknown Author')
|
|
|
|
personas = {}
|
|
if os.path.exists(config.PERSONAS_FILE):
|
|
try:
|
|
with open(config.PERSONAS_FILE, 'r') as f: personas = json.load(f)
|
|
except: pass
|
|
|
|
if author_name not in personas:
|
|
utils.log("SYSTEM", f"Generating new persona profile for '{author_name}'...")
|
|
prompt = f"Analyze this writing style (Tone, Voice, Vocabulary). Write a 1-sentence author bio describing it.\nTEXT: {sample_text[:1000]}"
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
bio = response.text.strip()
|
|
except: bio = "Style analysis unavailable."
|
|
|
|
personas[author_name] = {
|
|
"name": author_name,
|
|
"bio": bio,
|
|
"sample_files": [filename],
|
|
"sample_text": sample_text[:500]
|
|
}
|
|
else:
|
|
utils.log("SYSTEM", f"Updating persona '{author_name}' with new sample.")
|
|
if 'sample_files' not in personas[author_name]: personas[author_name]['sample_files'] = []
|
|
if filename not in personas[author_name]['sample_files']:
|
|
personas[author_name]['sample_files'].append(filename)
|
|
|
|
with open(config.PERSONAS_FILE, 'w') as f: json.dump(personas, f, indent=2)
|
|
|
|
def refine_bible(bible, instruction, folder):
|
|
utils.log("SYSTEM", f"Refining Bible with instruction: {instruction}")
|
|
prompt = f"""
|
|
Act as a Senior Developmental Editor.
|
|
CURRENT JSON: {json.dumps(bible)}
|
|
USER INSTRUCTION: {instruction}
|
|
|
|
TASK: Update the JSON based on the instruction. Maintain valid JSON structure.
|
|
Ensure character motivations remain consistent and plot holes are avoided.
|
|
|
|
RETURN ONLY THE JSON.
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
new_data = json.loads(utils.clean_json(response.text))
|
|
return new_data
|
|
except Exception as e:
|
|
utils.log("SYSTEM", f"Refinement failed: {e}")
|
|
return None
|
|
|
|
def analyze_consistency(bp, manuscript, folder):
|
|
utils.log("EDITOR", "Analyzing manuscript for continuity errors...")
|
|
|
|
if not manuscript: return {"issues": ["No manuscript found."], "score": 0}
|
|
if not bp: return {"issues": ["No blueprint found."], "score": 0}
|
|
|
|
# Summarize chapters to save tokens (pass full text if small enough, but usually summaries are safer)
|
|
chapter_summaries = []
|
|
for ch in manuscript:
|
|
text = ch.get('content', '')
|
|
# Take first 1000 and last 1000 chars to capture setup and resolution of scenes
|
|
excerpt = text[:1000] + "\n...\n" + text[-1000:] if len(text) > 2000 else text
|
|
chapter_summaries.append(f"Ch {ch.get('num')}: {excerpt}")
|
|
|
|
context = "\n".join(chapter_summaries)
|
|
|
|
prompt = f"""
|
|
Act as a Continuity Editor. Analyze this book summary for plot holes and inconsistencies.
|
|
|
|
CHARACTERS: {json.dumps(bp.get('characters', []))}
|
|
|
|
CHAPTER SUMMARIES:
|
|
{context}
|
|
|
|
TASK:
|
|
Identify 3-5 major continuity errors or plot holes (e.g. dead characters appearing, teleporting, forgotten injuries, motivation flips).
|
|
If none, say "No major issues found."
|
|
|
|
Return JSON: {{ "issues": ["Issue 1", "Issue 2"], "score": 8, "summary": "Brief overall assessment." }} (Score 1-10 on logical consistency)
|
|
"""
|
|
try:
|
|
response = ai.model_logic.generate_content(prompt)
|
|
utils.log_usage(folder, "logic-pro", response.usage_metadata)
|
|
return json.loads(utils.clean_json(response.text))
|
|
except Exception as e:
|
|
return {"issues": [f"Analysis failed: {e}"], "score": 0, "summary": "Error during analysis."}
|
|
|
|
def rewrite_chapter_content(bp, manuscript, chapter_num, instruction, folder):
|
|
utils.log("WRITER", f"Rewriting Ch {chapter_num} with instruction: {instruction}")
|
|
|
|
# Find target chapter and previous context
|
|
target_chap = next((c for c in manuscript if c['num'] == chapter_num), None)
|
|
if not target_chap: return None
|
|
|
|
prev_text = ""
|
|
|
|
# Determine previous chapter logic
|
|
prev_chap = None
|
|
if isinstance(chapter_num, int):
|
|
prev_chap = next((c for c in manuscript if c['num'] == chapter_num - 1), None)
|
|
elif str(chapter_num).lower() == "epilogue":
|
|
# Find the highest numbered chapter
|
|
numbered_chaps = [c for c in manuscript if isinstance(c['num'], int)]
|
|
if numbered_chaps:
|
|
prev_chap = max(numbered_chaps, key=lambda x: x['num'])
|
|
|
|
if prev_chap:
|
|
prev_text = prev_chap.get('content', '')[-3000:] # Last 3000 chars for context
|
|
|
|
meta = bp.get('book_metadata', {})
|
|
|
|
prompt = f"""
|
|
Act as a Ghostwriter. Rewrite Chapter {chapter_num}: {target_chap.get('title', '')}
|
|
|
|
USER INSTRUCTION (PRIMARY DIRECTIVE):
|
|
{instruction}
|
|
|
|
STORY CONTEXT:
|
|
- Title: {meta.get('title')}
|
|
- Genre: {meta.get('genre')}
|
|
- Tone: {meta.get('style', {}).get('tone')}
|
|
|
|
PREVIOUS CHAPTER ENDING (Continuity):
|
|
{prev_text}
|
|
|
|
CURRENT DRAFT (Reference only - feel free to change significantly based on instruction):
|
|
{target_chap.get('content', '')[:5000]}
|
|
|
|
CHARACTERS:
|
|
{json.dumps(bp.get('characters', []))}
|
|
|
|
TASK:
|
|
Write the full chapter content in Markdown.
|
|
- Ensure it flows naturally from the previous chapter ending.
|
|
- Follow the User Instruction strictly, even if it contradicts the current draft.
|
|
- Maintain the established character voices.
|
|
"""
|
|
|
|
try:
|
|
response = ai.model_writer.generate_content(prompt)
|
|
utils.log_usage(folder, "writer-flash", response.usage_metadata)
|
|
return response.text
|
|
except Exception as e:
|
|
utils.log("WRITER", f"Rewrite failed: {e}")
|
|
return None
|
|
|
|
def check_and_propagate(bp, manuscript, changed_chap_num, folder):
|
|
utils.log("WRITER", f"Checking ripple effects from Ch {changed_chap_num}...")
|
|
|
|
# Find the changed chapter
|
|
changed_chap = next((c for c in manuscript if c['num'] == changed_chap_num), None)
|
|
if not changed_chap: return None
|
|
|
|
# Summarize the change to save tokens
|
|
change_summary_prompt = f"Summarize the key events and ending state of this chapter:\n{changed_chap.get('content', '')[:10000]}"
|
|
try:
|
|
resp = ai.model_writer.generate_content(change_summary_prompt)
|
|
current_context = resp.text
|
|
except:
|
|
current_context = changed_chap.get('content', '')[-2000:] # Fallback
|
|
|
|
original_change_context = current_context
|
|
# Iterate subsequent chapters
|
|
sorted_ms = sorted(manuscript, key=utils.chapter_sort_key)
|
|
start_index = -1
|
|
for i, c in enumerate(sorted_ms):
|
|
if str(c['num']) == str(changed_chap_num):
|
|
start_index = i
|
|
break
|
|
|
|
if start_index == -1 or start_index == len(sorted_ms) - 1:
|
|
return None
|
|
|
|
changes_made = False
|
|
consecutive_no_changes = 0
|
|
potential_impact_chapters = []
|
|
|
|
for i in range(start_index + 1, len(sorted_ms)):
|
|
target_chap = sorted_ms[i]
|
|
|
|
# Optimization: If 2 chapters in a row didn't need changes, assume the ripple has stopped locally.
|
|
# Perform Long-Range Scan to see if we need to jump ahead.
|
|
if consecutive_no_changes >= 2:
|
|
if target_chap['num'] not in potential_impact_chapters:
|
|
# Check if we have pending future flags
|
|
future_flags = [n for n in potential_impact_chapters if isinstance(n, int) and isinstance(target_chap['num'], int) and n > target_chap['num']]
|
|
|
|
if not future_flags:
|
|
# No pending flags. Scan remaining chapters.
|
|
remaining_chaps = sorted_ms[i:]
|
|
if not remaining_chaps: break
|
|
|
|
utils.log("WRITER", " -> Short-term ripple dissipated. Scanning remaining chapters for long-range impacts...")
|
|
|
|
chapter_summaries = []
|
|
for rc in remaining_chaps:
|
|
text = rc.get('content', '')
|
|
excerpt = text[:500] + "\n...\n" + text[-500:] if len(text) > 1000 else text
|
|
chapter_summaries.append(f"Ch {rc['num']}: {excerpt}")
|
|
|
|
scan_prompt = f"""
|
|
We are propagating a change from Chapter {changed_chap_num}.
|
|
The immediate ripple effect seems to have stopped.
|
|
|
|
ORIGINAL CHANGE CONTEXT:
|
|
{original_change_context}
|
|
|
|
REMAINING CHAPTERS:
|
|
{json.dumps(chapter_summaries)}
|
|
|
|
TASK:
|
|
Identify any later chapters that mention items, characters, or locations involved in the Change Context.
|
|
Return a JSON list of Chapter Numbers (integers) that might need updating.
|
|
Example: [5, 12]
|
|
If none, return [].
|
|
"""
|
|
|
|
try:
|
|
resp = ai.model_logic.generate_content(scan_prompt)
|
|
potential_impact_chapters = json.loads(utils.clean_json(resp.text))
|
|
if not isinstance(potential_impact_chapters, list): potential_impact_chapters = []
|
|
# Ensure integers
|
|
potential_impact_chapters = [int(x) for x in potential_impact_chapters if str(x).isdigit()]
|
|
except Exception as e:
|
|
utils.log("WRITER", f" -> Scan failed: {e}. Stopping.")
|
|
break
|
|
|
|
if not potential_impact_chapters:
|
|
utils.log("WRITER", " -> No long-range impacts detected. Stopping.")
|
|
break
|
|
else:
|
|
utils.log("WRITER", f" -> Detected potential impact in chapters: {potential_impact_chapters}")
|
|
|
|
# If current chapter is still not in the list, skip it
|
|
# Safety: Always check non-integer chapters (Prologue/Epilogue) to be safe
|
|
if isinstance(target_chap['num'], int) and target_chap['num'] not in potential_impact_chapters:
|
|
utils.log("WRITER", f" -> Skipping Ch {target_chap['num']} (Not flagged).")
|
|
continue
|
|
|
|
utils.log("WRITER", f" -> Checking Ch {target_chap['num']} for continuity...")
|
|
|
|
prompt = f"""
|
|
Chapter {changed_chap_num} was just rewritten.
|
|
NEW CONTEXT/ENDING of previous section:
|
|
{current_context}
|
|
|
|
CURRENT TEXT of Ch {target_chap['num']}:
|
|
{target_chap['content'][:5000]}... (truncated)
|
|
|
|
TASK:
|
|
Does Ch {target_chap['num']} need to be rewritten to maintain continuity with the new context?
|
|
- If YES (e.g. references old events that changed, character states don't match): Rewrite the chapter fully in Markdown.
|
|
- If NO (it fits fine): Return ONLY the string "NO_CHANGE".
|
|
"""
|
|
|
|
try:
|
|
response = ai.model_writer.generate_content(prompt)
|
|
text = response.text.strip()
|
|
|
|
if "NO_CHANGE" in text[:20] and len(text) < 100:
|
|
utils.log("WRITER", f" -> Ch {target_chap['num']} is consistent.")
|
|
# Update context for next iteration using existing text
|
|
current_context = f"Ch {target_chap['num']} Summary: " + target_chap.get('content', '')[-2000:]
|
|
consecutive_no_changes += 1
|
|
else:
|
|
utils.log("WRITER", f" -> Rewriting Ch {target_chap['num']} to fix continuity.")
|
|
target_chap['content'] = text
|
|
changes_made = True
|
|
# Update context with NEW text
|
|
current_context = f"Ch {target_chap['num']} Summary: " + text[-2000:]
|
|
consecutive_no_changes = 0
|
|
|
|
# Save immediately to prevent data loss if subsequent checks fail
|
|
try:
|
|
with open(os.path.join(folder, "manuscript.json"), 'w') as f: json.dump(manuscript, f, indent=2)
|
|
except: pass
|
|
|
|
except Exception as e:
|
|
utils.log("WRITER", f" -> Check failed: {e}")
|
|
|
|
return manuscript if changes_made else None |