v2.0.0: Modularize project into single-responsibility packages
Replaced monolithic modules/ package with a clean architecture:
- core/ config.py, utils.py
- ai/ models.py (ResilientModel), setup.py (init_models)
- story/ planner.py, writer.py, editor.py, style_persona.py, bible_tracker.py
- marketing/ cover.py, blurb.py, fonts.py, assets.py
- export/ exporter.py
- web/ app.py (Flask factory), db.py, helpers.py, tasks.py, routes/{auth,project,run,persona,admin}.py
- cli/ engine.py (run_generation), wizard.py (BookWizard)
Flask routes split into 5 Blueprints; all templates updated with blueprint-
prefixed url_for() calls. Dockerfile and docker-compose updated to use
web.app entry point and new package paths.
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
0
story/__init__.py
Normal file
0
story/__init__.py
Normal file
144
story/bible_tracker.py
Normal file
144
story/bible_tracker.py
Normal file
@@ -0,0 +1,144 @@
|
||||
import json
|
||||
from core import utils
|
||||
from ai import models as ai_models
|
||||
|
||||
|
||||
def merge_selected_changes(original, draft, selected_keys):
|
||||
def sort_key(k):
|
||||
return [int(p) if p.isdigit() else p for p in k.split('.')]
|
||||
selected_keys.sort(key=sort_key)
|
||||
|
||||
for key in selected_keys:
|
||||
parts = key.split('.')
|
||||
|
||||
if parts[0] == 'meta' and len(parts) == 2:
|
||||
field = parts[1]
|
||||
if field == 'tone':
|
||||
original['project_metadata']['style']['tone'] = draft['project_metadata']['style']['tone']
|
||||
elif field in original['project_metadata']:
|
||||
original['project_metadata'][field] = draft['project_metadata'][field]
|
||||
|
||||
elif parts[0] == 'char' and len(parts) >= 2:
|
||||
idx = int(parts[1])
|
||||
if idx < len(draft['characters']):
|
||||
if idx < len(original['characters']):
|
||||
original['characters'][idx] = draft['characters'][idx]
|
||||
else:
|
||||
original['characters'].append(draft['characters'][idx])
|
||||
|
||||
elif parts[0] == 'book' and len(parts) >= 2:
|
||||
book_num = int(parts[1])
|
||||
orig_book = next((b for b in original['books'] if b['book_number'] == book_num), None)
|
||||
draft_book = next((b for b in draft['books'] if b['book_number'] == book_num), None)
|
||||
|
||||
if draft_book:
|
||||
if not orig_book:
|
||||
original['books'].append(draft_book)
|
||||
original['books'].sort(key=lambda x: x.get('book_number', 999))
|
||||
continue
|
||||
|
||||
if len(parts) == 2:
|
||||
orig_book['title'] = draft_book['title']
|
||||
orig_book['manual_instruction'] = draft_book['manual_instruction']
|
||||
|
||||
elif len(parts) == 4 and parts[2] == 'beat':
|
||||
beat_idx = int(parts[3])
|
||||
if beat_idx < len(draft_book['plot_beats']):
|
||||
while len(orig_book['plot_beats']) <= beat_idx:
|
||||
orig_book['plot_beats'].append("")
|
||||
orig_book['plot_beats'][beat_idx] = draft_book['plot_beats'][beat_idx]
|
||||
return original
|
||||
|
||||
|
||||
def filter_characters(chars):
|
||||
blacklist = ['name', 'character name', 'role', 'protagonist', 'antagonist', 'love interest', 'unknown', 'tbd', 'todo', 'hero', 'villain', 'main character', 'side character']
|
||||
return [c for c in chars if c.get('name') and c.get('name').lower().strip() not in blacklist]
|
||||
|
||||
|
||||
def update_tracking(folder, chapter_num, chapter_text, current_tracking):
|
||||
utils.log("TRACKER", f"Updating world state & character visuals for Ch {chapter_num}...")
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Continuity Tracker
|
||||
TASK: Update the Story Bible based on the new chapter.
|
||||
|
||||
INPUT_TRACKING:
|
||||
{json.dumps(current_tracking)}
|
||||
|
||||
NEW_TEXT:
|
||||
{chapter_text[:20000]}
|
||||
|
||||
OPERATIONS:
|
||||
1. EVENTS: Append 1-3 key plot points to 'events'.
|
||||
2. CHARACTERS: Update 'descriptors', 'likes_dislikes', 'speech_style', 'last_worn', 'major_events'.
|
||||
- "descriptors": List of strings. Add PERMANENT physical traits (height, hair, eyes), specific items (jewelry, weapons). Avoid duplicates.
|
||||
- "likes_dislikes": List of strings. Add specific preferences, likes, or dislikes mentioned (e.g., "Hates coffee", "Loves jazz").
|
||||
- "speech_style": String. Describe how they speak (e.g. "Formal, no contractions", "Uses slang", "Stutters", "Short sentences").
|
||||
- "last_worn": String. Update if specific clothing is described. IMPORTANT: If a significant time jump occurred (e.g. next day) and no new clothing is described, reset this to "Unknown".
|
||||
- "major_events": List of strings. Log significant life-altering events occurring in THIS chapter (e.g. "Lost an arm", "Married", "Betrayed by X").
|
||||
3. WARNINGS: Append new 'content_warnings'.
|
||||
|
||||
OUTPUT_FORMAT (JSON): Return the updated tracking object structure.
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
new_data = json.loads(utils.clean_json(response.text))
|
||||
return new_data
|
||||
except Exception as e:
|
||||
utils.log("TRACKER", f"Failed to update tracking: {e}")
|
||||
return current_tracking
|
||||
|
||||
|
||||
def harvest_metadata(bp, folder, full_manuscript):
|
||||
utils.log("HARVESTER", "Scanning for new characters...")
|
||||
full_text = "\n".join([c.get('content', '') for c in full_manuscript])[:500000]
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Data Extractor
|
||||
TASK: Identify NEW significant characters.
|
||||
|
||||
INPUT_TEXT:
|
||||
{full_text}
|
||||
|
||||
KNOWN_CHARACTERS: {json.dumps(bp['characters'])}
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "new_characters": [{{ "name": "String", "role": "String", "description": "String" }}] }}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
new_chars = json.loads(utils.clean_json(response.text)).get('new_characters', [])
|
||||
if new_chars:
|
||||
valid_chars = filter_characters(new_chars)
|
||||
if valid_chars:
|
||||
utils.log("HARVESTER", f"Found {len(valid_chars)} new chars.")
|
||||
bp['characters'].extend(valid_chars)
|
||||
except: pass
|
||||
return bp
|
||||
|
||||
|
||||
def refine_bible(bible, instruction, folder):
|
||||
utils.log("SYSTEM", f"Refining Bible with instruction: {instruction}")
|
||||
prompt = f"""
|
||||
ROLE: Senior Developmental Editor
|
||||
TASK: Update the Bible JSON based on instruction.
|
||||
|
||||
INPUT_DATA:
|
||||
- CURRENT_JSON: {json.dumps(bible)}
|
||||
- INSTRUCTION: {instruction}
|
||||
|
||||
CONSTRAINTS:
|
||||
- Maintain valid JSON structure.
|
||||
- Ensure consistency.
|
||||
|
||||
OUTPUT_FORMAT (JSON): The full updated Bible JSON object.
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
new_data = json.loads(utils.clean_json(response.text))
|
||||
return new_data
|
||||
except Exception as e:
|
||||
utils.log("SYSTEM", f"Refinement failed: {e}")
|
||||
return None
|
||||
399
story/editor.py
Normal file
399
story/editor.py
Normal file
@@ -0,0 +1,399 @@
|
||||
import json
|
||||
import os
|
||||
from core import utils
|
||||
from ai import models as ai_models
|
||||
from story.style_persona import get_style_guidelines
|
||||
|
||||
|
||||
def evaluate_chapter_quality(text, chapter_title, genre, model, folder):
|
||||
guidelines = get_style_guidelines()
|
||||
ai_isms = "', '".join(guidelines['ai_isms'])
|
||||
fw_examples = ", ".join([f"'He {w}'" for w in guidelines['filter_words'][:5]])
|
||||
|
||||
word_count = len(text.split()) if text else 0
|
||||
min_sugg = max(3, int(word_count / 500))
|
||||
max_sugg = min_sugg + 2
|
||||
suggestion_range = f"{min_sugg}-{max_sugg}"
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Senior Literary Editor
|
||||
TASK: Critique chapter draft.
|
||||
|
||||
METADATA:
|
||||
- TITLE: {chapter_title}
|
||||
- GENRE: {genre}
|
||||
|
||||
PROHIBITED_PATTERNS:
|
||||
- AI_ISMS: {ai_isms}
|
||||
- FILTER_WORDS: {fw_examples}
|
||||
- CLICHES: White Room, As You Know Bob, Summary Mode, Anachronisms.
|
||||
- SYNTAX: Repetitive structure, Passive Voice, Adverb Reliance.
|
||||
|
||||
QUALITY_RUBRIC (1-10):
|
||||
1. ENGAGEMENT & TENSION: Does the story grip the reader from the first line? Is there conflict or tension in every scene?
|
||||
2. SCENE EXECUTION: Is the middle of the chapter fully fleshed out? Does it avoid "sagging" or summarizing key moments?
|
||||
3. VOICE & TONE: Is the narrative voice distinct? Does it match the genre?
|
||||
4. SENSORY IMMERSION: Does the text use sensory details effectively without being overwhelming?
|
||||
5. SHOW, DON'T TELL: Are emotions shown through physical reactions and subtext?
|
||||
6. CHARACTER AGENCY: Do characters drive the plot through active choices?
|
||||
7. PACING: Does the chapter feel rushed? Does the ending land with impact, or does it cut off too abruptly?
|
||||
8. GENRE APPROPRIATENESS: Are introductions of characters, places, items, or actions consistent with the {genre} conventions?
|
||||
9. DIALOGUE AUTHENTICITY: Do characters sound distinct? Is there subtext? Avoids "on-the-nose" dialogue.
|
||||
10. PLOT RELEVANCE: Does the chapter advance the plot or character arcs significantly? Avoids filler.
|
||||
11. STAGING & FLOW: Do characters enter/exit physically? Do paragraphs transition logically (Action -> Reaction)?
|
||||
12. PROSE DYNAMICS: Is there sentence variety? Avoids purple prose, adjective stacking, and excessive modification.
|
||||
13. CLARITY & READABILITY: Is the text easy to follow? Are sentences clear and concise?
|
||||
|
||||
SCORING_SCALE:
|
||||
- 10 (Masterpiece): Flawless, impactful, ready for print.
|
||||
- 9 (Bestseller): Exceptional quality, minor style tweaks only.
|
||||
- 7-8 (Professional): Good draft, solid structure, needs editing.
|
||||
- 6 (Passable): Average, has issues with pacing or voice. Needs heavy refinement.
|
||||
- 1-5 (Fail): Structural flaws, boring, or incoherent. Needs rewrite.
|
||||
|
||||
OUTPUT_FORMAT (JSON):
|
||||
{{
|
||||
"score": int,
|
||||
"critique": "Detailed analysis of flaws, citing specific examples from the text.",
|
||||
"actionable_feedback": "List of {suggestion_range} specific, ruthless instructions for the rewrite (e.g. 'Expand the middle dialogue', 'Add sensory details about the rain', 'Dramatize the argument instead of summarizing it')."
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = model.generate_content([prompt, text[:30000]])
|
||||
model_name = getattr(model, 'name', ai_models.logic_model_name)
|
||||
utils.log_usage(folder, model_name, response.usage_metadata)
|
||||
data = json.loads(utils.clean_json(response.text))
|
||||
|
||||
critique_text = data.get('critique', 'No critique provided.')
|
||||
if data.get('actionable_feedback'):
|
||||
critique_text += "\n\nREQUIRED FIXES:\n" + str(data.get('actionable_feedback'))
|
||||
|
||||
return data.get('score', 0), critique_text
|
||||
except Exception as e:
|
||||
return 0, f"Evaluation error: {str(e)}"
|
||||
|
||||
|
||||
def check_pacing(bp, summary, last_chapter_text, last_chapter_data, remaining_chapters, folder):
|
||||
utils.log("ARCHITECT", "Checking pacing and structure health...")
|
||||
|
||||
if not remaining_chapters:
|
||||
return None
|
||||
|
||||
meta = bp.get('book_metadata', {})
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Structural Editor
|
||||
TASK: Analyze pacing.
|
||||
|
||||
CONTEXT:
|
||||
- PREVIOUS_SUMMARY: {summary[-3000:]}
|
||||
- CURRENT_CHAPTER: {last_chapter_text[-2000:]}
|
||||
- UPCOMING: {json.dumps([c['title'] for c in remaining_chapters[:3]])}
|
||||
- REMAINING_COUNT: {len(remaining_chapters)}
|
||||
|
||||
LOGIC:
|
||||
- IF skipped major beats -> ADD_BRIDGE
|
||||
- IF covered next chapter's beats -> CUT_NEXT
|
||||
- ELSE -> OK
|
||||
|
||||
OUTPUT_FORMAT (JSON):
|
||||
{{
|
||||
"status": "ok" or "add_bridge" or "cut_next",
|
||||
"reason": "Explanation...",
|
||||
"new_chapter": {{ "title": "...", "beats": ["..."], "pov_character": "..." }} (Required if add_bridge)
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
return json.loads(utils.clean_json(response.text))
|
||||
except Exception as e:
|
||||
utils.log("ARCHITECT", f"Pacing check failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def analyze_consistency(bp, manuscript, folder):
|
||||
utils.log("EDITOR", "Analyzing manuscript for continuity errors...")
|
||||
|
||||
if not manuscript: return {"issues": ["No manuscript found."], "score": 0}
|
||||
if not bp: return {"issues": ["No blueprint found."], "score": 0}
|
||||
|
||||
chapter_summaries = []
|
||||
for ch in manuscript:
|
||||
text = ch.get('content', '')
|
||||
excerpt = text[:1000] + "\n...\n" + text[-1000:] if len(text) > 2000 else text
|
||||
chapter_summaries.append(f"Ch {ch.get('num')}: {excerpt}")
|
||||
|
||||
context = "\n".join(chapter_summaries)
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Continuity Editor
|
||||
TASK: Analyze book summary for plot holes.
|
||||
|
||||
INPUT_DATA:
|
||||
- CHARACTERS: {json.dumps(bp.get('characters', []))}
|
||||
- SUMMARIES:
|
||||
{context}
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "issues": ["Issue 1", "Issue 2"], "score": 8, "summary": "Brief overall assessment." }}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
return json.loads(utils.clean_json(response.text))
|
||||
except Exception as e:
|
||||
return {"issues": [f"Analysis failed: {e}"], "score": 0, "summary": "Error during analysis."}
|
||||
|
||||
|
||||
def rewrite_chapter_content(bp, manuscript, chapter_num, instruction, folder):
|
||||
utils.log("WRITER", f"Rewriting Ch {chapter_num} with instruction: {instruction}")
|
||||
|
||||
target_chap = next((c for c in manuscript if str(c.get('num')) == str(chapter_num)), None)
|
||||
if not target_chap: return None
|
||||
|
||||
prev_text = ""
|
||||
prev_chap = None
|
||||
if isinstance(chapter_num, int):
|
||||
prev_chap = next((c for c in manuscript if c['num'] == chapter_num - 1), None)
|
||||
elif str(chapter_num).lower() == "epilogue":
|
||||
numbered_chaps = [c for c in manuscript if isinstance(c['num'], int)]
|
||||
if numbered_chaps:
|
||||
prev_chap = max(numbered_chaps, key=lambda x: x['num'])
|
||||
|
||||
if prev_chap:
|
||||
prev_text = prev_chap.get('content', '')[-3000:]
|
||||
|
||||
meta = bp.get('book_metadata', {})
|
||||
|
||||
ad = meta.get('author_details', {})
|
||||
if not ad and 'author_bio' in meta:
|
||||
persona_info = meta['author_bio']
|
||||
else:
|
||||
persona_info = f"Name: {ad.get('name', meta.get('author', 'Unknown'))}\n"
|
||||
if ad.get('bio'): persona_info += f"Style/Bio: {ad['bio']}\n"
|
||||
|
||||
char_visuals = ""
|
||||
from core import config
|
||||
tracking_path = os.path.join(folder, "tracking_characters.json")
|
||||
if os.path.exists(tracking_path):
|
||||
try:
|
||||
tracking_chars = utils.load_json(tracking_path)
|
||||
if tracking_chars:
|
||||
char_visuals = "\nCHARACTER TRACKING (Visuals & Preferences):\n"
|
||||
for name, data in tracking_chars.items():
|
||||
desc = ", ".join(data.get('descriptors', []))
|
||||
speech = data.get('speech_style', 'Unknown')
|
||||
char_visuals += f"- {name}: {desc}\n * Speech: {speech}\n"
|
||||
except: pass
|
||||
|
||||
guidelines = get_style_guidelines()
|
||||
fw_list = '", "'.join(guidelines['filter_words'])
|
||||
|
||||
prompt = f"""
|
||||
You are an expert fiction writing AI. Your task is to rewrite a specific chapter based on a user directive.
|
||||
|
||||
INPUT DATA:
|
||||
- TITLE: {meta.get('title')}
|
||||
- GENRE: {meta.get('genre')}
|
||||
- TONE: {meta.get('style', {}).get('tone')}
|
||||
- AUTHOR_VOICE: {persona_info}
|
||||
- PREVIOUS_CONTEXT: {prev_text}
|
||||
- CURRENT_DRAFT: {target_chap.get('content', '')[:5000]}
|
||||
- CHARACTERS: {json.dumps(bp.get('characters', []))}
|
||||
{char_visuals}
|
||||
|
||||
PRIMARY DIRECTIVE (USER INSTRUCTION):
|
||||
{instruction}
|
||||
|
||||
EXECUTION RULES:
|
||||
1. CONTINUITY: The new text must flow logically from PREVIOUS_CONTEXT.
|
||||
2. ADHERENCE: The PRIMARY DIRECTIVE overrides any conflicting details in CURRENT_DRAFT.
|
||||
3. VOICE: Strictly emulate the AUTHOR_VOICE.
|
||||
4. GENRE: Enforce {meta.get('genre')} conventions. No anachronisms.
|
||||
5. LOGIC: Enforce strict causality (Action -> Reaction). No teleporting characters.
|
||||
|
||||
PROSE OPTIMIZATION RULES (STRICT ENFORCEMENT):
|
||||
- FILTER_REMOVAL: Scan for words [{fw_list}]. If found, rewrite the sentence to remove the filter and describe the sensation directly.
|
||||
- SENTENCE_VARIETY: Penalize consecutive sentences starting with the same pronoun or article. Vary structure.
|
||||
- SHOW_DONT_TELL: Convert internal summaries of emotion into physical actions or subtextual dialogue.
|
||||
- ACTIVE_VOICE: Convert passive voice ("was [verb]ed") to active voice.
|
||||
- SENSORY_ANCHORING: The first paragraph must establish the setting using at least one non-visual sense (smell, sound, touch).
|
||||
- SUBTEXT: Dialogue must imply meaning rather than stating it outright.
|
||||
|
||||
RETURN JSON:
|
||||
{{
|
||||
"content": "The full chapter text in Markdown...",
|
||||
"summary": "A concise summary of the chapter's events and ending state (for continuity checks)."
|
||||
}}
|
||||
"""
|
||||
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
try:
|
||||
data = json.loads(utils.clean_json(response.text))
|
||||
return data.get('content'), data.get('summary')
|
||||
except:
|
||||
return response.text, None
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f"Rewrite failed: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
def check_and_propagate(bp, manuscript, changed_chap_num, folder, change_summary=None):
|
||||
utils.log("WRITER", f"Checking ripple effects from Ch {changed_chap_num}...")
|
||||
|
||||
changed_chap = next((c for c in manuscript if c['num'] == changed_chap_num), None)
|
||||
if not changed_chap: return None
|
||||
|
||||
if change_summary:
|
||||
current_context = change_summary
|
||||
else:
|
||||
change_summary_prompt = f"""
|
||||
ROLE: Summarizer
|
||||
TASK: Summarize the key events and ending state of this chapter for continuity tracking.
|
||||
|
||||
TEXT:
|
||||
{changed_chap.get('content', '')[:10000]}
|
||||
|
||||
FOCUS:
|
||||
- Major plot points.
|
||||
- Character status changes (injuries, items acquired, location changes).
|
||||
- New information revealed.
|
||||
|
||||
OUTPUT: Concise text summary.
|
||||
"""
|
||||
try:
|
||||
resp = ai_models.model_writer.generate_content(change_summary_prompt)
|
||||
utils.log_usage(folder, ai_models.model_writer.name, resp.usage_metadata)
|
||||
current_context = resp.text
|
||||
except:
|
||||
current_context = changed_chap.get('content', '')[-2000:]
|
||||
|
||||
original_change_context = current_context
|
||||
sorted_ms = sorted(manuscript, key=utils.chapter_sort_key)
|
||||
start_index = -1
|
||||
for i, c in enumerate(sorted_ms):
|
||||
if str(c['num']) == str(changed_chap_num):
|
||||
start_index = i
|
||||
break
|
||||
|
||||
if start_index == -1 or start_index == len(sorted_ms) - 1:
|
||||
return None
|
||||
|
||||
changes_made = False
|
||||
consecutive_no_changes = 0
|
||||
potential_impact_chapters = []
|
||||
|
||||
for i in range(start_index + 1, len(sorted_ms)):
|
||||
target_chap = sorted_ms[i]
|
||||
|
||||
if consecutive_no_changes >= 2:
|
||||
if target_chap['num'] not in potential_impact_chapters:
|
||||
future_flags = [n for n in potential_impact_chapters if isinstance(n, int) and isinstance(target_chap['num'], int) and n > target_chap['num']]
|
||||
|
||||
if not future_flags:
|
||||
remaining_chaps = sorted_ms[i:]
|
||||
if not remaining_chaps: break
|
||||
|
||||
utils.log("WRITER", " -> Short-term ripple dissipated. Scanning remaining chapters for long-range impacts...")
|
||||
|
||||
chapter_summaries = []
|
||||
for rc in remaining_chaps:
|
||||
text = rc.get('content', '')
|
||||
excerpt = text[:500] + "\n...\n" + text[-500:] if len(text) > 1000 else text
|
||||
chapter_summaries.append(f"Ch {rc['num']}: {excerpt}")
|
||||
|
||||
scan_prompt = f"""
|
||||
ROLE: Continuity Scanner
|
||||
TASK: Identify chapters impacted by a change.
|
||||
|
||||
CHANGE_CONTEXT:
|
||||
{original_change_context}
|
||||
|
||||
CHAPTER_SUMMARIES:
|
||||
{json.dumps(chapter_summaries)}
|
||||
|
||||
CRITERIA: Identify later chapters that mention items, characters, or locations involved in the Change Context.
|
||||
|
||||
OUTPUT_FORMAT (JSON): [Chapter_Number_Int, ...]
|
||||
"""
|
||||
|
||||
try:
|
||||
resp = ai_models.model_logic.generate_content(scan_prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, resp.usage_metadata)
|
||||
potential_impact_chapters = json.loads(utils.clean_json(resp.text))
|
||||
if not isinstance(potential_impact_chapters, list): potential_impact_chapters = []
|
||||
potential_impact_chapters = [int(x) for x in potential_impact_chapters if str(x).isdigit()]
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f" -> Scan failed: {e}. Stopping.")
|
||||
break
|
||||
|
||||
if not potential_impact_chapters:
|
||||
utils.log("WRITER", " -> No long-range impacts detected. Stopping.")
|
||||
break
|
||||
else:
|
||||
utils.log("WRITER", f" -> Detected potential impact in chapters: {potential_impact_chapters}")
|
||||
|
||||
if isinstance(target_chap['num'], int) and target_chap['num'] not in potential_impact_chapters:
|
||||
utils.log("WRITER", f" -> Skipping Ch {target_chap['num']} (Not flagged).")
|
||||
continue
|
||||
|
||||
utils.log("WRITER", f" -> Checking Ch {target_chap['num']} for continuity...")
|
||||
|
||||
chap_word_count = len(target_chap.get('content', '').split())
|
||||
prompt = f"""
|
||||
ROLE: Continuity Checker
|
||||
TASK: Determine if a chapter contradicts a story change. If it does, rewrite it to fix the contradiction.
|
||||
|
||||
CHANGED_CHAPTER: {changed_chap_num}
|
||||
CHANGE_SUMMARY: {current_context}
|
||||
|
||||
CHAPTER_TO_CHECK (Ch {target_chap['num']}):
|
||||
{target_chap['content'][:12000]}
|
||||
|
||||
DECISION_LOGIC:
|
||||
- If the chapter directly contradicts the change (references dead characters, items that no longer exist, events that didn't happen), status = REWRITE.
|
||||
- If the chapter is consistent or only tangentially related, status = NO_CHANGE.
|
||||
- Be conservative — only rewrite if there is a genuine contradiction.
|
||||
|
||||
REWRITE_RULES (apply only if REWRITE):
|
||||
- Fix the specific contradiction. Preserve all other content.
|
||||
- The rewritten chapter MUST be approximately {chap_word_count} words (same length as original).
|
||||
- Include the chapter header formatted as Markdown H1.
|
||||
- Do not add new plot points not in the original.
|
||||
|
||||
OUTPUT_FORMAT (JSON):
|
||||
{{
|
||||
"status": "NO_CHANGE" or "REWRITE",
|
||||
"reason": "Brief explanation of the contradiction or why it's consistent",
|
||||
"content": "Full Markdown rewritten chapter (ONLY if status is REWRITE, otherwise null)"
|
||||
}}
|
||||
"""
|
||||
|
||||
try:
|
||||
response = ai_models.model_writer.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_writer.name, response.usage_metadata)
|
||||
data = json.loads(utils.clean_json(response.text))
|
||||
|
||||
if data.get('status') == 'NO_CHANGE':
|
||||
utils.log("WRITER", f" -> Ch {target_chap['num']} is consistent.")
|
||||
current_context = f"Ch {target_chap['num']} Summary: " + target_chap.get('content', '')[-2000:]
|
||||
consecutive_no_changes += 1
|
||||
elif data.get('status') == 'REWRITE' and data.get('content'):
|
||||
new_text = data.get('content')
|
||||
if new_text:
|
||||
utils.log("WRITER", f" -> Rewriting Ch {target_chap['num']} to fix continuity.")
|
||||
target_chap['content'] = new_text
|
||||
changes_made = True
|
||||
current_context = f"Ch {target_chap['num']} Summary: " + new_text[-2000:]
|
||||
consecutive_no_changes = 0
|
||||
|
||||
try:
|
||||
with open(os.path.join(folder, "manuscript.json"), 'w') as f: json.dump(manuscript, f, indent=2)
|
||||
except: pass
|
||||
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f" -> Check failed: {e}")
|
||||
|
||||
return manuscript if changes_made else None
|
||||
265
story/planner.py
Normal file
265
story/planner.py
Normal file
@@ -0,0 +1,265 @@
|
||||
import json
|
||||
import random
|
||||
from core import utils
|
||||
from ai import models as ai_models
|
||||
from story.bible_tracker import filter_characters
|
||||
|
||||
|
||||
def enrich(bp, folder, context=""):
|
||||
utils.log("ENRICHER", "Fleshing out details from description...")
|
||||
|
||||
if 'book_metadata' not in bp: bp['book_metadata'] = {}
|
||||
if 'characters' not in bp: bp['characters'] = []
|
||||
if 'plot_beats' not in bp: bp['plot_beats'] = []
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Creative Director
|
||||
TASK: Create a comprehensive Book Bible from the user description.
|
||||
|
||||
INPUT DATA:
|
||||
- USER_DESCRIPTION: "{bp.get('manual_instruction', 'A generic story')}"
|
||||
- CONTEXT (Sequel): {context}
|
||||
|
||||
STEPS:
|
||||
1. Generate a catchy Title.
|
||||
2. Define the Genre and Tone.
|
||||
3. Determine the Time Period (e.g. "Modern", "1920s", "Sci-Fi Future").
|
||||
4. Define Formatting Rules for text messages, thoughts, and chapter headers.
|
||||
5. Create Protagonist and Antagonist/Love Interest.
|
||||
- Logic: If sequel, reuse context. If new, create.
|
||||
6. Outline 5-7 core Plot Beats.
|
||||
7. Define a 'structure_prompt' describing the narrative arc (e.g. "Hero's Journey", "3-Act Structure", "Detective Procedural").
|
||||
|
||||
OUTPUT_FORMAT (JSON):
|
||||
{{
|
||||
"book_metadata": {{ "title": "Book Title", "genre": "Genre", "content_warnings": ["Violence", "Major Character Death"], "structure_prompt": "...", "style": {{ "tone": "Tone", "time_period": "Modern", "formatting_rules": ["Chapter Headers: Number + Title", "Text Messages: Italic", "Thoughts: Italic"] }} }},
|
||||
"characters": [ {{ "name": "John Doe", "role": "Protagonist", "description": "Description", "key_events": ["Planned injury in Act 2"] }} ],
|
||||
"plot_beats": [ "Beat 1", "Beat 2", "..." ]
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
ai_data = json.loads(utils.clean_json(response.text))
|
||||
|
||||
if 'book_metadata' not in bp: bp['book_metadata'] = {}
|
||||
|
||||
if 'title' not in bp['book_metadata']:
|
||||
bp['book_metadata']['title'] = ai_data.get('book_metadata', {}).get('title')
|
||||
if 'structure_prompt' not in bp['book_metadata']:
|
||||
bp['book_metadata']['structure_prompt'] = ai_data.get('book_metadata', {}).get('structure_prompt')
|
||||
if 'content_warnings' not in bp['book_metadata']:
|
||||
bp['book_metadata']['content_warnings'] = ai_data.get('book_metadata', {}).get('content_warnings', [])
|
||||
|
||||
if 'style' not in bp['book_metadata']: bp['book_metadata']['style'] = {}
|
||||
|
||||
source_style = ai_data.get('book_metadata', {}).get('style', {})
|
||||
for k, v in source_style.items():
|
||||
if k not in bp['book_metadata']['style']:
|
||||
bp['book_metadata']['style'][k] = v
|
||||
|
||||
if 'characters' not in bp or not bp['characters']:
|
||||
bp['characters'] = ai_data.get('characters', [])
|
||||
|
||||
if 'characters' in bp:
|
||||
bp['characters'] = filter_characters(bp['characters'])
|
||||
|
||||
if 'plot_beats' not in bp or not bp['plot_beats']:
|
||||
bp['plot_beats'] = ai_data.get('plot_beats', [])
|
||||
|
||||
return bp
|
||||
except Exception as e:
|
||||
utils.log("ENRICHER", f"Enrichment failed: {e}")
|
||||
return bp
|
||||
|
||||
|
||||
def plan_structure(bp, folder):
|
||||
utils.log("ARCHITECT", "Creating structure...")
|
||||
|
||||
structure_type = bp.get('book_metadata', {}).get('structure_prompt')
|
||||
|
||||
if not structure_type:
|
||||
label = bp.get('length_settings', {}).get('label', 'Novel')
|
||||
structures = {
|
||||
"Chapter Book": "Create a simple episodic structure with clear chapter hooks.",
|
||||
"Young Adult": "Create a character-driven arc with high emotional stakes and a clear 'Coming of Age' theme.",
|
||||
"Flash Fiction": "Create a single, impactful scene structure with a twist.",
|
||||
"Short Story": "Create a concise narrative arc (Inciting Incident -> Rising Action -> Climax -> Resolution).",
|
||||
"Novella": "Create a standard 3-Act Structure.",
|
||||
"Novel": "Create a detailed 3-Act Structure with A and B plots.",
|
||||
"Epic": "Create a complex, multi-arc structure (Hero's Journey) with extensive world-building events."
|
||||
}
|
||||
structure_type = structures.get(label, "Create a 3-Act Structure.")
|
||||
|
||||
beats_context = bp.get('plot_beats', [])
|
||||
target_chapters = bp.get('length_settings', {}).get('chapters', 'flexible')
|
||||
target_words = bp.get('length_settings', {}).get('words', 'flexible')
|
||||
chars_summary = [{"name": c.get("name"), "role": c.get("role")} for c in bp.get('characters', [])]
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Story Architect
|
||||
TASK: Create a detailed structural event outline for a {target_chapters}-chapter book.
|
||||
|
||||
BOOK:
|
||||
- TITLE: {bp['book_metadata']['title']}
|
||||
- GENRE: {bp.get('book_metadata', {}).get('genre', 'Fiction')}
|
||||
- TARGET_CHAPTERS: {target_chapters}
|
||||
- TARGET_WORDS: {target_words}
|
||||
- STRUCTURE: {structure_type}
|
||||
|
||||
CHARACTERS: {json.dumps(chars_summary)}
|
||||
|
||||
USER_BEATS (must all be preserved and woven into the outline):
|
||||
{json.dumps(beats_context)}
|
||||
|
||||
REQUIREMENTS:
|
||||
- Produce enough events to fill approximately {target_chapters} chapters.
|
||||
- Each event must serve a narrative purpose (setup, escalation, reversal, climax, resolution).
|
||||
- Distribute events across a beginning, middle, and end — avoid front-loading.
|
||||
- Character arcs must be visible through the events (growth, change, revelation).
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "events": [{{ "description": "String", "purpose": "String" }}] }}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
return json.loads(utils.clean_json(response.text))['events']
|
||||
except:
|
||||
return []
|
||||
|
||||
|
||||
def expand(events, pass_num, target_chapters, bp, folder):
|
||||
utils.log("ARCHITECT", f"Expansion pass {pass_num} | Current Beats: {len(events)} | Target Chaps: {target_chapters}")
|
||||
|
||||
event_ceiling = int(target_chapters * 1.5)
|
||||
if len(events) >= event_ceiling:
|
||||
task = (
|
||||
f"The outline already has {len(events)} beats for a {target_chapters}-chapter book — do NOT add more events. "
|
||||
f"Instead, enrich each existing beat's description with more specific detail: setting, characters involved, emotional stakes, and how it connects to what follows."
|
||||
)
|
||||
else:
|
||||
task = (
|
||||
f"Expand the outline toward {target_chapters} chapters. "
|
||||
f"Current count: {len(events)} beats. "
|
||||
f"Add intermediate events to fill pacing gaps, deepen subplots, and ensure character arcs are visible. "
|
||||
f"Do not overshoot — aim for {target_chapters} to {event_ceiling} total events."
|
||||
)
|
||||
|
||||
original_beats = bp.get('plot_beats', [])
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Story Architect
|
||||
TASK: {task}
|
||||
|
||||
ORIGINAL_USER_BEATS (must all remain present):
|
||||
{json.dumps(original_beats)}
|
||||
|
||||
CURRENT_EVENTS:
|
||||
{json.dumps(events)}
|
||||
|
||||
RULES:
|
||||
1. PRESERVE all original user beats — do not remove or alter them.
|
||||
2. New events must serve a clear narrative purpose (tension, character, world, reversal).
|
||||
3. Avoid repetitive events — each beat must be distinct.
|
||||
4. Distribute additions evenly — do not front-load the outline.
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "events": [{{"description": "String", "purpose": "String"}}] }}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
new_events = json.loads(utils.clean_json(response.text))['events']
|
||||
|
||||
if len(new_events) > len(events):
|
||||
utils.log("ARCHITECT", f" -> Added {len(new_events) - len(events)} new beats.")
|
||||
elif len(str(new_events)) > len(str(events)) + 20:
|
||||
utils.log("ARCHITECT", f" -> Fleshed out descriptions (Text grew by {len(str(new_events)) - len(str(events))} chars).")
|
||||
else:
|
||||
utils.log("ARCHITECT", " -> No significant changes.")
|
||||
return new_events
|
||||
except Exception as e:
|
||||
utils.log("ARCHITECT", f" -> Pass skipped due to error: {e}")
|
||||
return events
|
||||
|
||||
|
||||
def create_chapter_plan(events, bp, folder):
|
||||
utils.log("ARCHITECT", "Finalizing Chapters...")
|
||||
target = bp['length_settings']['chapters']
|
||||
words = bp['length_settings'].get('words', 'Flexible')
|
||||
|
||||
include_prologue = bp.get('length_settings', {}).get('include_prologue', False)
|
||||
include_epilogue = bp.get('length_settings', {}).get('include_epilogue', False)
|
||||
|
||||
structure_instructions = ""
|
||||
if include_prologue: structure_instructions += "- Include a 'Prologue' (chapter_number: 0) to set the scene.\n"
|
||||
if include_epilogue: structure_instructions += "- Include an 'Epilogue' (chapter_number: 'Epilogue') to wrap up.\n"
|
||||
|
||||
meta = bp.get('book_metadata', {})
|
||||
style = meta.get('style', {})
|
||||
pov_chars = style.get('pov_characters', [])
|
||||
pov_instruction = ""
|
||||
if pov_chars:
|
||||
pov_instruction = f"- Assign a 'pov_character' for each chapter from this list: {json.dumps(pov_chars)}."
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Pacing Specialist
|
||||
TASK: Group the provided events into chapters for a {meta.get('genre', 'Fiction')} {bp['length_settings'].get('label', 'novel')}.
|
||||
|
||||
GUIDELINES:
|
||||
- AIM for approximately {target} chapters, but the final count may vary ±15% if the story structure demands it.
|
||||
- TARGET_WORDS for the whole book: {words}
|
||||
- Assign pacing to each chapter: Very Fast / Fast / Standard / Slow / Very Slow
|
||||
- estimated_words per chapter should reflect its pacing:
|
||||
Very Fast ≈ 60% of average, Fast ≈ 80%, Standard ≈ 100%, Slow ≈ 125%, Very Slow ≈ 150%
|
||||
- Do NOT force equal word counts. Natural variation makes the book feel alive.
|
||||
{structure_instructions}
|
||||
{pov_instruction}
|
||||
|
||||
INPUT_EVENTS: {json.dumps(events)}
|
||||
|
||||
OUTPUT_FORMAT (JSON): [{{"chapter_number": 1, "title": "String", "pov_character": "String", "pacing": "String", "estimated_words": 2000, "beats": ["String"]}}]
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
plan = json.loads(utils.clean_json(response.text))
|
||||
|
||||
target_str = str(words).lower().replace(',', '').replace('k', '000').replace('+', '').replace(' ', '')
|
||||
target_val = 0
|
||||
if '-' in target_str:
|
||||
try:
|
||||
parts = target_str.split('-')
|
||||
target_val = int((int(parts[0]) + int(parts[1])) / 2)
|
||||
except: pass
|
||||
else:
|
||||
try: target_val = int(target_str)
|
||||
except: pass
|
||||
|
||||
if target_val > 0:
|
||||
variance = random.uniform(0.92, 1.08)
|
||||
target_val = int(target_val * variance)
|
||||
utils.log("ARCHITECT", f"Word target after variance ({variance:.2f}x): {target_val} words.")
|
||||
|
||||
current_sum = sum(int(c.get('estimated_words', 0)) for c in plan)
|
||||
if current_sum > 0:
|
||||
base_factor = target_val / current_sum
|
||||
pacing_weight = {
|
||||
'very fast': 0.60, 'fast': 0.80, 'standard': 1.00,
|
||||
'slow': 1.25, 'very slow': 1.50
|
||||
}
|
||||
for c in plan:
|
||||
pw = pacing_weight.get(c.get('pacing', 'standard').lower(), 1.0)
|
||||
c['estimated_words'] = max(300, int(c.get('estimated_words', 0) * base_factor * pw))
|
||||
|
||||
adjusted_sum = sum(c['estimated_words'] for c in plan)
|
||||
if adjusted_sum > 0:
|
||||
norm = target_val / adjusted_sum
|
||||
for c in plan:
|
||||
c['estimated_words'] = max(300, int(c['estimated_words'] * norm))
|
||||
|
||||
utils.log("ARCHITECT", f"Chapter lengths scaled by pacing. Total ≈ {sum(c['estimated_words'] for c in plan)} words across {len(plan)} chapters.")
|
||||
|
||||
return plan
|
||||
except Exception as e:
|
||||
utils.log("ARCHITECT", f"Failed to create chapter plan: {e}")
|
||||
return []
|
||||
180
story/style_persona.py
Normal file
180
story/style_persona.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from core import config, utils
|
||||
from ai import models as ai_models
|
||||
|
||||
|
||||
def get_style_guidelines():
|
||||
defaults = {
|
||||
"ai_isms": [
|
||||
'testament to', 'tapestry', 'shiver down spine', 'unspoken agreement',
|
||||
'palpable tension', 'a sense of', 'suddenly', 'in that moment',
|
||||
'symphony of', 'dance of', 'azure', 'cerulean'
|
||||
],
|
||||
"filter_words": [
|
||||
'felt', 'saw', 'heard', 'realized', 'decided', 'noticed', 'knew', 'thought'
|
||||
]
|
||||
}
|
||||
path = os.path.join(config.DATA_DIR, "style_guidelines.json")
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
user_data = utils.load_json(path)
|
||||
if user_data:
|
||||
if 'ai_isms' in user_data: defaults['ai_isms'] = user_data['ai_isms']
|
||||
if 'filter_words' in user_data: defaults['filter_words'] = user_data['filter_words']
|
||||
except: pass
|
||||
else:
|
||||
try:
|
||||
with open(path, 'w') as f: json.dump(defaults, f, indent=2)
|
||||
except: pass
|
||||
return defaults
|
||||
|
||||
|
||||
def refresh_style_guidelines(model, folder=None):
|
||||
utils.log("SYSTEM", "Refreshing Style Guidelines via AI...")
|
||||
current = get_style_guidelines()
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Literary Editor
|
||||
TASK: Update 'Banned Words' lists for AI writing.
|
||||
|
||||
INPUT_DATA:
|
||||
- CURRENT_AI_ISMS: {json.dumps(current.get('ai_isms', []))}
|
||||
- CURRENT_FILTER_WORDS: {json.dumps(current.get('filter_words', []))}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Review lists. Remove false positives.
|
||||
2. Add new common AI tropes (e.g. 'neon-lit', 'bustling', 'a sense of', 'mined', 'delved').
|
||||
3. Ensure robustness.
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "ai_isms": [strings], "filter_words": [strings] }}
|
||||
"""
|
||||
try:
|
||||
response = model.generate_content(prompt)
|
||||
model_name = getattr(model, 'name', ai_models.logic_model_name)
|
||||
if folder: utils.log_usage(folder, model_name, response.usage_metadata)
|
||||
new_data = json.loads(utils.clean_json(response.text))
|
||||
|
||||
if 'ai_isms' in new_data and 'filter_words' in new_data:
|
||||
path = os.path.join(config.DATA_DIR, "style_guidelines.json")
|
||||
with open(path, 'w') as f: json.dump(new_data, f, indent=2)
|
||||
utils.log("SYSTEM", "Style Guidelines updated.")
|
||||
return new_data
|
||||
except Exception as e:
|
||||
utils.log("SYSTEM", f"Failed to refresh guidelines: {e}")
|
||||
return current
|
||||
|
||||
|
||||
def create_initial_persona(bp, folder):
|
||||
utils.log("SYSTEM", "Generating initial Author Persona based on genre/tone...")
|
||||
meta = bp.get('book_metadata', {})
|
||||
style = meta.get('style', {})
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Creative Director
|
||||
TASK: Create a fictional 'Author Persona'.
|
||||
|
||||
METADATA:
|
||||
- TITLE: {meta.get('title')}
|
||||
- GENRE: {meta.get('genre')}
|
||||
- TONE: {style.get('tone')}
|
||||
- AUDIENCE: {meta.get('target_audience')}
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "name": "Pen Name", "bio": "Description of writing style (voice, sentence structure, vocabulary)...", "age": "...", "gender": "..." }}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
return json.loads(utils.clean_json(response.text))
|
||||
except Exception as e:
|
||||
utils.log("SYSTEM", f"Persona generation failed: {e}")
|
||||
return {"name": "AI Author", "bio": "Standard, balanced writing style."}
|
||||
|
||||
|
||||
def refine_persona(bp, text, folder):
|
||||
utils.log("SYSTEM", "Refining Author Persona based on recent chapters...")
|
||||
ad = bp.get('book_metadata', {}).get('author_details', {})
|
||||
current_bio = ad.get('bio', 'Standard style.')
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Literary Stylist
|
||||
TASK: Refine Author Bio based on text sample.
|
||||
|
||||
INPUT_DATA:
|
||||
- TEXT_SAMPLE: {text[:3000]}
|
||||
- CURRENT_BIO: {current_bio}
|
||||
|
||||
GOAL: Ensure future chapters sound exactly like the sample. Highlight quirks, patterns, vocabulary.
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "bio": "Updated bio..." }}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
new_bio = json.loads(utils.clean_json(response.text)).get('bio')
|
||||
if new_bio:
|
||||
ad['bio'] = new_bio
|
||||
utils.log("SYSTEM", " -> Persona bio updated.")
|
||||
return ad
|
||||
except: pass
|
||||
return ad
|
||||
|
||||
|
||||
def update_persona_sample(bp, folder):
|
||||
utils.log("SYSTEM", "Extracting author persona from manuscript...")
|
||||
|
||||
ms_path = os.path.join(folder, "manuscript.json")
|
||||
if not os.path.exists(ms_path): return
|
||||
ms = utils.load_json(ms_path)
|
||||
if not ms: return
|
||||
|
||||
full_text = "\n".join([c.get('content', '') for c in ms])
|
||||
if len(full_text) < 500: return
|
||||
|
||||
if not os.path.exists(config.PERSONAS_DIR): os.makedirs(config.PERSONAS_DIR)
|
||||
|
||||
meta = bp.get('book_metadata', {})
|
||||
safe_title = utils.sanitize_filename(meta.get('title', 'book'))[:20]
|
||||
timestamp = int(time.time())
|
||||
filename = f"sample_{safe_title}_{timestamp}.txt"
|
||||
filepath = os.path.join(config.PERSONAS_DIR, filename)
|
||||
|
||||
sample_text = full_text[:3000]
|
||||
with open(filepath, 'w', encoding='utf-8') as f: f.write(sample_text)
|
||||
|
||||
author_name = meta.get('author', 'Unknown Author')
|
||||
|
||||
personas = {}
|
||||
if os.path.exists(config.PERSONAS_FILE):
|
||||
try:
|
||||
with open(config.PERSONAS_FILE, 'r') as f: personas = json.load(f)
|
||||
except: pass
|
||||
|
||||
if author_name not in personas:
|
||||
utils.log("SYSTEM", f"Generating new persona profile for '{author_name}'...")
|
||||
prompt = f"""
|
||||
ROLE: Literary Analyst
|
||||
TASK: Analyze writing style (Tone, Voice, Vocabulary).
|
||||
TEXT: {sample_text[:1000]}
|
||||
OUTPUT: 1-sentence author bio.
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
bio = response.text.strip()
|
||||
except: bio = "Style analysis unavailable."
|
||||
|
||||
personas[author_name] = {
|
||||
"name": author_name,
|
||||
"bio": bio,
|
||||
"sample_files": [filename],
|
||||
"sample_text": sample_text[:500]
|
||||
}
|
||||
else:
|
||||
utils.log("SYSTEM", f"Updating persona '{author_name}' with new sample.")
|
||||
if 'sample_files' not in personas[author_name]: personas[author_name]['sample_files'] = []
|
||||
if filename not in personas[author_name]['sample_files']:
|
||||
personas[author_name]['sample_files'].append(filename)
|
||||
|
||||
with open(config.PERSONAS_FILE, 'w') as f: json.dump(personas, f, indent=2)
|
||||
278
story/writer.py
Normal file
278
story/writer.py
Normal file
@@ -0,0 +1,278 @@
|
||||
import json
|
||||
import os
|
||||
from core import config, utils
|
||||
from ai import models as ai_models
|
||||
from story.style_persona import get_style_guidelines
|
||||
from story.editor import evaluate_chapter_quality
|
||||
|
||||
|
||||
def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None, next_chapter_hint=""):
|
||||
pacing = chap.get('pacing', 'Standard')
|
||||
est_words = chap.get('estimated_words', 'Flexible')
|
||||
utils.log("WRITER", f"Drafting Ch {chap['chapter_number']} ({pacing} | ~{est_words} words): {chap['title']}")
|
||||
ls = bp['length_settings']
|
||||
meta = bp.get('book_metadata', {})
|
||||
style = meta.get('style', {})
|
||||
genre = meta.get('genre', 'Fiction')
|
||||
|
||||
pov_char = chap.get('pov_character', '')
|
||||
|
||||
ad = meta.get('author_details', {})
|
||||
if not ad and 'author_bio' in meta:
|
||||
persona_info = meta['author_bio']
|
||||
else:
|
||||
persona_info = f"Name: {ad.get('name', meta.get('author', 'Unknown'))}\n"
|
||||
if ad.get('age'): persona_info += f"Age: {ad['age']}\n"
|
||||
if ad.get('gender'): persona_info += f"Gender: {ad['gender']}\n"
|
||||
if ad.get('race'): persona_info += f"Race: {ad['race']}\n"
|
||||
if ad.get('nationality'): persona_info += f"Nationality: {ad['nationality']}\n"
|
||||
if ad.get('language'): persona_info += f"Language: {ad['language']}\n"
|
||||
if ad.get('bio'): persona_info += f"Style/Bio: {ad['bio']}\n"
|
||||
|
||||
samples = []
|
||||
if ad.get('sample_text'):
|
||||
samples.append(f"--- SAMPLE PARAGRAPH ---\n{ad['sample_text']}")
|
||||
|
||||
if ad.get('sample_files'):
|
||||
for fname in ad['sample_files']:
|
||||
fpath = os.path.join(config.PERSONAS_DIR, fname)
|
||||
if os.path.exists(fpath):
|
||||
try:
|
||||
with open(fpath, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read(3000)
|
||||
samples.append(f"--- SAMPLE FROM {fname} ---\n{content}...")
|
||||
except: pass
|
||||
|
||||
if samples:
|
||||
persona_info += "\nWRITING STYLE SAMPLES:\n" + "\n".join(samples)
|
||||
|
||||
char_visuals = ""
|
||||
if tracking and 'characters' in tracking:
|
||||
char_visuals = "\nCHARACTER TRACKING (Visuals & Preferences):\n"
|
||||
for name, data in tracking['characters'].items():
|
||||
desc = ", ".join(data.get('descriptors', []))
|
||||
likes = ", ".join(data.get('likes_dislikes', []))
|
||||
speech = data.get('speech_style', 'Unknown')
|
||||
worn = data.get('last_worn', 'Unknown')
|
||||
char_visuals += f"- {name}: {desc}\n * Speech: {speech}\n * Likes/Dislikes: {likes}\n"
|
||||
|
||||
major = data.get('major_events', [])
|
||||
if major: char_visuals += f" * Major Events: {'; '.join(major)}\n"
|
||||
|
||||
if worn and worn != 'Unknown':
|
||||
char_visuals += f" * Last Worn: {worn} (NOTE: Only relevant if scene is continuous from previous chapter)\n"
|
||||
|
||||
style_block = "\n".join([f"- {k.replace('_', ' ').title()}: {v}" for k, v in style.items() if isinstance(v, (str, int, float))])
|
||||
if 'tropes' in style and isinstance(style['tropes'], list):
|
||||
style_block += f"\n- Tropes: {', '.join(style['tropes'])}"
|
||||
|
||||
if 'formatting_rules' in style and isinstance(style['formatting_rules'], list):
|
||||
style_block += "\n- Formatting Rules:\n * " + "\n * ".join(style['formatting_rules'])
|
||||
|
||||
prev_context_block = ""
|
||||
if prev_content:
|
||||
trunc_content = prev_content[-3000:] if len(prev_content) > 3000 else prev_content
|
||||
prev_context_block = f"\nPREVIOUS CHAPTER TEXT (For Tone & Continuity):\n{trunc_content}\n"
|
||||
|
||||
chars_for_writer = [
|
||||
{"name": c.get("name"), "role": c.get("role"), "description": c.get("description", "")}
|
||||
for c in bp.get('characters', [])
|
||||
]
|
||||
|
||||
total_chapters = ls.get('chapters', '?')
|
||||
prompt = f"""
|
||||
ROLE: Fiction Writer
|
||||
TASK: Write Chapter {chap['chapter_number']}: {chap['title']}
|
||||
|
||||
METADATA:
|
||||
- GENRE: {genre}
|
||||
- FORMAT: {ls.get('label', 'Story')}
|
||||
- POSITION: Chapter {chap['chapter_number']} of {total_chapters} — calibrate narrative tension accordingly (early = setup/intrigue, middle = escalation, final third = payoff/climax)
|
||||
- PACING: {pacing} — see PACING_GUIDE below
|
||||
- TARGET_WORDS: ~{est_words} (write to this length; do not summarise to save space)
|
||||
- POV: {pov_char if pov_char else 'Protagonist'}
|
||||
|
||||
PACING_GUIDE:
|
||||
- 'Very Fast': Pure action/dialogue. Minimal description. Short punchy paragraphs.
|
||||
- 'Fast': Keep momentum. No lingering. Cut to the next beat quickly.
|
||||
- 'Standard': Balanced dialogue and description. Standard paragraph lengths.
|
||||
- 'Slow': Detailed, atmospheric. Linger on emotion and environment.
|
||||
- 'Very Slow': Deep introspection. Heavy sensory immersion. Slow burn tension.
|
||||
|
||||
STYLE_GUIDE:
|
||||
{style_block}
|
||||
|
||||
AUTHOR_VOICE:
|
||||
{persona_info}
|
||||
|
||||
INSTRUCTIONS:
|
||||
- Start with the Chapter Header formatted as Markdown H1 (e.g. '# Chapter X: Title'). Follow the 'Formatting Rules' for the header style.
|
||||
|
||||
- SENSORY ANCHORING: Start scenes by establishing Who, Where, and When immediately.
|
||||
- DEEP POV: Immerse the reader in the POV character's immediate experience. Filter descriptions through their specific worldview and emotional state.
|
||||
- SHOW, DON'T TELL: Focus on immediate action and internal reaction. Don't summarize feelings; show the physical manifestation of them.
|
||||
- CAUSALITY: Ensure events follow a "Because of X, Y happened" logic, not just "And then X, and then Y".
|
||||
- STAGING: When characters enter, describe their entrance. Don't let them just "appear" in dialogue.
|
||||
- SENSORY DETAILS: Use specific sensory details sparingly to ground the scene. Avoid stacking adjectives (e.g. "crisp white blouses, sharp legal briefs").
|
||||
- ACTIVE VOICE: Use active voice. Subject -> Verb -> Object. Avoid "was/were" constructions.
|
||||
- STRONG VERBS: Delete adverbs. Use specific verbs (e.g. "trudged" instead of "walked slowly").
|
||||
- NO INFO-DUMPS: Weave backstory into dialogue or action. Do not stop the story to explain history.
|
||||
- AVOID CLICHÉS: Avoid common AI tropes (e.g., 'shiver down spine', 'palpable tension', 'unspoken agreement', 'testament to', 'tapestry of', 'azure', 'cerulean').
|
||||
- MAINTAIN CONTINUITY: Pay close attention to the PREVIOUS CONTEXT. Characters must NOT know things that haven't happened yet or haven't been revealed to them.
|
||||
- CHARACTER INTERACTIONS: If characters are meeting for the first time in the summary, treat them as strangers.
|
||||
- SENTENCE VARIETY: Avoid repetitive sentence structures (e.g. starting multiple sentences with "He" or "She"). Vary sentence length to create rhythm.
|
||||
- GENRE CONSISTENCY: Ensure all introductions of characters, places, items, or actions are strictly appropriate for the {genre} genre. Avoid anachronisms or tonal clashes.
|
||||
- DIALOGUE VOICE: Every character speaks with their own distinct voice (see CHARACTER TRACKING for speech styles). No two characters may sound the same. Vary sentence length, vocabulary, and register per character.
|
||||
- CHAPTER HOOK: End this chapter with unresolved tension — a decision pending, a threat imminent, or a question unanswered.{f" Seed subtle anticipation for the next scene: '{next_chapter_hint}'." if next_chapter_hint else " Do not neatly resolve all threads."}
|
||||
|
||||
QUALITY_CRITERIA:
|
||||
1. ENGAGEMENT & TENSION: Grip the reader. Ensure conflict/tension in every scene.
|
||||
2. SCENE EXECUTION: Flesh out the middle. Avoid summarizing key moments.
|
||||
3. VOICE & TONE: Distinct narrative voice matching the genre.
|
||||
4. SENSORY IMMERSION: Engage all five senses.
|
||||
5. SHOW, DON'T TELL: Show emotions through physical reactions and subtext.
|
||||
6. CHARACTER AGENCY: Characters must drive the plot through active choices.
|
||||
7. PACING: Avoid rushing. Ensure the ending lands with impact.
|
||||
8. GENRE APPROPRIATENESS: Introductions of characters, places, items, or actions must be consistent with {genre} conventions.
|
||||
9. DIALOGUE AUTHENTICITY: Characters must sound distinct. Use subtext. Avoid "on-the-nose" dialogue.
|
||||
10. PLOT RELEVANCE: Every scene must advance the plot or character arcs. No filler.
|
||||
11. STAGING & FLOW: Characters must enter and exit physically. Paragraphs must transition logically.
|
||||
12. PROSE DYNAMICS: Vary sentence length. Use strong verbs. Avoid passive voice.
|
||||
13. CLARITY: Ensure sentences are clear and readable. Avoid convoluted phrasing.
|
||||
|
||||
CONTEXT:
|
||||
- STORY_SO_FAR: {prev_sum}
|
||||
{prev_context_block}
|
||||
- CHARACTERS: {json.dumps(chars_for_writer)}
|
||||
{char_visuals}
|
||||
- SCENE_BEATS: {json.dumps(chap['beats'])}
|
||||
|
||||
OUTPUT: Markdown text.
|
||||
"""
|
||||
current_text = ""
|
||||
try:
|
||||
resp_draft = ai_models.model_writer.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_writer.name, resp_draft.usage_metadata)
|
||||
current_text = resp_draft.text
|
||||
draft_words = len(current_text.split()) if current_text else 0
|
||||
utils.log("WRITER", f" -> Draft: {draft_words:,} words (target: ~{est_words})")
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f"⚠️ Failed Ch {chap['chapter_number']}: {e}")
|
||||
return f"## Chapter {chap['chapter_number']} Failed\n\nError: {e}"
|
||||
|
||||
max_attempts = 5
|
||||
SCORE_AUTO_ACCEPT = 8
|
||||
SCORE_PASSING = 7
|
||||
SCORE_REWRITE_THRESHOLD = 6
|
||||
|
||||
best_score = 0
|
||||
best_text = current_text
|
||||
past_critiques = []
|
||||
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
utils.log("WRITER", f" -> Evaluating Ch {chap['chapter_number']} (Attempt {attempt}/{max_attempts})...")
|
||||
score, critique = evaluate_chapter_quality(current_text, chap['title'], meta.get('genre', 'Fiction'), ai_models.model_writer, folder)
|
||||
|
||||
past_critiques.append(f"Attempt {attempt}: {critique}")
|
||||
|
||||
if "Evaluation error" in critique:
|
||||
utils.log("WRITER", f" ⚠️ {critique}. Keeping current draft.")
|
||||
if best_score == 0: best_text = current_text
|
||||
break
|
||||
|
||||
utils.log("WRITER", f" Score: {score}/10. Critique: {critique}")
|
||||
|
||||
if score >= SCORE_AUTO_ACCEPT:
|
||||
utils.log("WRITER", " 🌟 Auto-Accept threshold met.")
|
||||
return current_text
|
||||
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_text = current_text
|
||||
|
||||
if attempt == max_attempts:
|
||||
if best_score >= SCORE_PASSING:
|
||||
utils.log("WRITER", f" ✅ Max attempts reached. Accepting best score ({best_score}).")
|
||||
return best_text
|
||||
else:
|
||||
utils.log("WRITER", f" ⚠️ Quality low ({best_score}/{SCORE_PASSING}) but max attempts reached. Proceeding.")
|
||||
return best_text
|
||||
|
||||
if score < SCORE_REWRITE_THRESHOLD:
|
||||
utils.log("WRITER", f" -> Score {score} < {SCORE_REWRITE_THRESHOLD}. Triggering FULL REWRITE (Fresh Draft)...")
|
||||
|
||||
full_rewrite_prompt = prompt + f"""
|
||||
|
||||
[SYSTEM ALERT: QUALITY CHECK FAILED]
|
||||
The previous draft was rejected.
|
||||
CRITIQUE: {critique}
|
||||
|
||||
NEW TASK: Discard the previous attempt. Write a FRESH version of the chapter that addresses the critique above.
|
||||
"""
|
||||
|
||||
try:
|
||||
resp_rewrite = ai_models.model_logic.generate_content(full_rewrite_prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, resp_rewrite.usage_metadata)
|
||||
current_text = resp_rewrite.text
|
||||
continue
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f"Full rewrite failed: {e}. Falling back to refinement.")
|
||||
|
||||
utils.log("WRITER", f" -> Refining Ch {chap['chapter_number']} based on feedback...")
|
||||
|
||||
guidelines = get_style_guidelines()
|
||||
fw_list = '", "'.join(guidelines['filter_words'])
|
||||
|
||||
history_str = "\n".join(past_critiques[-3:-1]) if len(past_critiques) > 1 else "None"
|
||||
|
||||
refine_prompt = f"""
|
||||
ROLE: Automated Editor
|
||||
TASK: Rewrite the draft chapter to address the critique. Preserve the narrative content and approximate word count.
|
||||
|
||||
CURRENT_CRITIQUE:
|
||||
{critique}
|
||||
|
||||
PREVIOUS_ATTEMPTS (context only):
|
||||
{history_str}
|
||||
|
||||
HARD_CONSTRAINTS:
|
||||
- TARGET_WORDS: ~{est_words} words (aim for this; ±20% is acceptable if the scene genuinely demands it — but do not condense beats to save space)
|
||||
- BEATS MUST BE COVERED: {json.dumps(chap.get('beats', []))}
|
||||
- SUMMARY CONTEXT: {prev_sum[:1500]}
|
||||
|
||||
AUTHOR_VOICE:
|
||||
{persona_info}
|
||||
|
||||
STYLE:
|
||||
{style_block}
|
||||
{char_visuals}
|
||||
|
||||
PROSE_RULES (fix each one found in the draft):
|
||||
1. FILTER_REMOVAL: Remove filter words [{fw_list}] — rewrite to show the sensation directly.
|
||||
2. VARIETY: No two consecutive sentences starting with the same word or pronoun.
|
||||
3. SUBTEXT: Dialogue must imply meaning — not state it outright.
|
||||
4. TONE: Match {meta.get('genre', 'Fiction')} conventions throughout.
|
||||
5. ENVIRONMENT: Characters interact with their physical space.
|
||||
6. NO_SUMMARY_MODE: Dramatise key moments — do not skip or summarise them.
|
||||
7. ACTIVE_VOICE: Replace 'was/were + verb-ing' constructions with active alternatives.
|
||||
8. SHOWING: Render emotion through physical reactions, not labels.
|
||||
9. STAGING: Characters must enter and exit physically — no teleporting.
|
||||
10. CLARITY: Prefer simple sentence structures over convoluted ones.
|
||||
|
||||
DRAFT_TO_REWRITE:
|
||||
{current_text}
|
||||
|
||||
PREVIOUS_CHAPTER_ENDING (maintain continuity):
|
||||
{prev_context_block}
|
||||
|
||||
OUTPUT: Complete polished chapter in Markdown. Include the chapter header. Same approximate length as the draft.
|
||||
"""
|
||||
try:
|
||||
resp_refine = ai_models.model_writer.generate_content(refine_prompt)
|
||||
utils.log_usage(folder, ai_models.model_writer.name, resp_refine.usage_metadata)
|
||||
current_text = resp_refine.text
|
||||
except Exception as e:
|
||||
utils.log("WRITER", f"Refinement failed: {e}")
|
||||
return best_text
|
||||
|
||||
return best_text
|
||||
Reference in New Issue
Block a user