import json import os import random import time import config from modules import ai from . import utils def get_style_guidelines(): defaults = { "ai_isms": [ 'testament to', 'tapestry', 'shiver down spine', 'unspoken agreement', 'palpable tension', 'a sense of', 'suddenly', 'in that moment', 'symphony of', 'dance of', 'azure', 'cerulean' ], "filter_words": [ 'felt', 'saw', 'heard', 'realized', 'decided', 'noticed', 'knew', 'thought' ] } path = os.path.join(config.DATA_DIR, "style_guidelines.json") if os.path.exists(path): try: user_data = utils.load_json(path) if user_data: if 'ai_isms' in user_data: defaults['ai_isms'] = user_data['ai_isms'] if 'filter_words' in user_data: defaults['filter_words'] = user_data['filter_words'] except: pass else: try: with open(path, 'w') as f: json.dump(defaults, f, indent=2) except: pass return defaults def refresh_style_guidelines(model, folder=None): utils.log("SYSTEM", "Refreshing Style Guidelines via AI...") current = get_style_guidelines() prompt = f""" ROLE: Literary Editor TASK: Update 'Banned Words' lists for AI writing. INPUT_DATA: - CURRENT_AI_ISMS: {json.dumps(current.get('ai_isms', []))} - CURRENT_FILTER_WORDS: {json.dumps(current.get('filter_words', []))} INSTRUCTIONS: 1. Review lists. Remove false positives. 2. Add new common AI tropes (e.g. 'neon-lit', 'bustling', 'a sense of', 'mined', 'delved'). 3. Ensure robustness. OUTPUT_FORMAT (JSON): {{ "ai_isms": [strings], "filter_words": [strings] }} """ try: response = model.generate_content(prompt) model_name = getattr(model, 'name', ai.logic_model_name) if folder: utils.log_usage(folder, model_name, response.usage_metadata) new_data = json.loads(utils.clean_json(response.text)) # Validate if 'ai_isms' in new_data and 'filter_words' in new_data: path = os.path.join(config.DATA_DIR, "style_guidelines.json") with open(path, 'w') as f: json.dump(new_data, f, indent=2) utils.log("SYSTEM", "Style Guidelines updated.") return new_data except Exception as e: utils.log("SYSTEM", f"Failed to refresh guidelines: {e}") return current def merge_selected_changes(original, draft, selected_keys): """Helper to merge specific fields from draft to original bible.""" # Sort keys to ensure deterministic order def sort_key(k): return [int(p) if p.isdigit() else p for p in k.split('.')] selected_keys.sort(key=sort_key) for key in selected_keys: parts = key.split('.') # Metadata: meta.title if parts[0] == 'meta' and len(parts) == 2: field = parts[1] if field == 'tone': original['project_metadata']['style']['tone'] = draft['project_metadata']['style']['tone'] elif field in original['project_metadata']: original['project_metadata'][field] = draft['project_metadata'][field] # Characters: char.0 elif parts[0] == 'char' and len(parts) >= 2: idx = int(parts[1]) if idx < len(draft['characters']): if idx < len(original['characters']): original['characters'][idx] = draft['characters'][idx] else: original['characters'].append(draft['characters'][idx]) # Books: book.1.title elif parts[0] == 'book' and len(parts) >= 2: book_num = int(parts[1]) orig_book = next((b for b in original['books'] if b['book_number'] == book_num), None) draft_book = next((b for b in draft['books'] if b['book_number'] == book_num), None) if draft_book: if not orig_book: original['books'].append(draft_book) original['books'].sort(key=lambda x: x.get('book_number', 999)) continue if len(parts) == 2: orig_book['title'] = draft_book['title'] orig_book['manual_instruction'] = draft_book['manual_instruction'] elif len(parts) == 4 and parts[2] == 'beat': beat_idx = int(parts[3]) if beat_idx < len(draft_book['plot_beats']): while len(orig_book['plot_beats']) <= beat_idx: orig_book['plot_beats'].append("") orig_book['plot_beats'][beat_idx] = draft_book['plot_beats'][beat_idx] return original def filter_characters(chars): """Removes placeholder characters generated by AI.""" blacklist = ['name', 'character name', 'role', 'protagonist', 'antagonist', 'love interest', 'unknown', 'tbd', 'todo', 'hero', 'villain', 'main character', 'side character'] return [c for c in chars if c.get('name') and c.get('name').lower().strip() not in blacklist] def enrich(bp, folder, context=""): utils.log("ENRICHER", "Fleshing out details from description...") # If book_metadata is missing, create empty dict so AI can fill it if 'book_metadata' not in bp: bp['book_metadata'] = {} if 'characters' not in bp: bp['characters'] = [] if 'plot_beats' not in bp: bp['plot_beats'] = [] prompt = f""" ROLE: Creative Director TASK: Create a comprehensive Book Bible from the user description. INPUT DATA: - USER_DESCRIPTION: "{bp.get('manual_instruction', 'A generic story')}" - CONTEXT (Sequel): {context} STEPS: 1. Generate a catchy Title. 2. Define the Genre and Tone. 3. Determine the Time Period (e.g. "Modern", "1920s", "Sci-Fi Future"). 4. Define Formatting Rules for text messages, thoughts, and chapter headers. 5. Create Protagonist and Antagonist/Love Interest. - Logic: If sequel, reuse context. If new, create. 6. Outline 5-7 core Plot Beats. 7. Define a 'structure_prompt' describing the narrative arc (e.g. "Hero's Journey", "3-Act Structure", "Detective Procedural"). OUTPUT_FORMAT (JSON): {{ "book_metadata": {{ "title": "Book Title", "genre": "Genre", "content_warnings": ["Violence", "Major Character Death"], "structure_prompt": "...", "style": {{ "tone": "Tone", "time_period": "Modern", "formatting_rules": ["Chapter Headers: Number + Title", "Text Messages: Italic", "Thoughts: Italic"] }} }}, "characters": [ {{ "name": "John Doe", "role": "Protagonist", "description": "Description", "key_events": ["Planned injury in Act 2"] }} ], "plot_beats": [ "Beat 1", "Beat 2", "..." ] }} """ try: # Merge AI response with existing data (don't overwrite if user provided specific keys) response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) response_text = response.text cleaned_json = utils.clean_json(response_text) ai_data = json.loads(cleaned_json) # Smart Merge: Only fill missing fields if 'book_metadata' not in bp: bp['book_metadata'] = {} if 'title' not in bp['book_metadata']: bp['book_metadata']['title'] = ai_data.get('book_metadata', {}).get('title') if 'structure_prompt' not in bp['book_metadata']: bp['book_metadata']['structure_prompt'] = ai_data.get('book_metadata', {}).get('structure_prompt') if 'content_warnings' not in bp['book_metadata']: bp['book_metadata']['content_warnings'] = ai_data.get('book_metadata', {}).get('content_warnings', []) # Merge Style (Flexible) if 'style' not in bp['book_metadata']: bp['book_metadata']['style'] = {} # Handle AI returning legacy keys or new style key source_style = ai_data.get('book_metadata', {}).get('style', {}) for k, v in source_style.items(): if k not in bp['book_metadata']['style']: bp['book_metadata']['style'][k] = v if 'characters' not in bp or not bp['characters']: bp['characters'] = ai_data.get('characters', []) # Filter out default names if 'characters' in bp: bp['characters'] = filter_characters(bp['characters']) if 'plot_beats' not in bp or not bp['plot_beats']: bp['plot_beats'] = ai_data.get('plot_beats', []) return bp except Exception as e: utils.log("ENRICHER", f"Enrichment failed: {e}") return bp def plan_structure(bp, folder): utils.log("ARCHITECT", "Creating structure...") structure_type = bp.get('book_metadata', {}).get('structure_prompt') if not structure_type: label = bp.get('length_settings', {}).get('label', 'Novel') structures = { "Chapter Book": "Create a simple episodic structure with clear chapter hooks.", "Young Adult": "Create a character-driven arc with high emotional stakes and a clear 'Coming of Age' theme.", "Flash Fiction": "Create a single, impactful scene structure with a twist.", "Short Story": "Create a concise narrative arc (Inciting Incident -> Rising Action -> Climax -> Resolution).", "Novella": "Create a standard 3-Act Structure.", "Novel": "Create a detailed 3-Act Structure with A and B plots.", "Epic": "Create a complex, multi-arc structure (Hero's Journey) with extensive world-building events." } structure_type = structures.get(label, "Create a 3-Act Structure.") beats_context = [] if not beats_context: beats_context = bp.get('plot_beats', []) prompt = f""" ROLE: Story Architect TASK: Create a structural event outline. ARCHETYPE: {structure_type} TITLE: {bp['book_metadata']['title']} EXISTING_BEATS: {json.dumps(beats_context)} OUTPUT_FORMAT (JSON): {{ "events": [{{ "description": "String", "purpose": "String" }}] }} """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) return json.loads(utils.clean_json(response.text))['events'] except: return [] def expand(events, pass_num, target_chapters, bp, folder): utils.log("ARCHITECT", f"Expansion pass {pass_num} | Current Beats: {len(events)} | Target Chaps: {target_chapters}") beats_context = [] if not beats_context: beats_context = bp.get('plot_beats', []) prompt = f""" ROLE: Story Architect TASK: Expand the outline to fit a {target_chapters}-chapter book. CURRENT_COUNT: {len(events)} beats. INPUT_OUTLINE: {json.dumps(beats_context)} CURRENT_EVENTS: {json.dumps(events)} RULES: 1. Detect pacing gaps. 2. Insert intermediate events. 3. Deepen subplots. 4. PRESERVE original beats. OUTPUT_FORMAT (JSON): {{ "events": [{{ "description": "String", "purpose": "String" }}] }} """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) new_events = json.loads(utils.clean_json(response.text))['events'] if len(new_events) > len(events): utils.log("ARCHITECT", f" -> Added {len(new_events) - len(events)} new beats.") elif len(str(new_events)) > len(str(events)) + 20: utils.log("ARCHITECT", f" -> Fleshed out descriptions (Text grew by {len(str(new_events)) - len(str(events))} chars).") else: utils.log("ARCHITECT", " -> No significant changes.") return new_events except Exception as e: utils.log("ARCHITECT", f" -> Pass skipped due to error: {e}") return events def create_chapter_plan(events, bp, folder): utils.log("ARCHITECT", "Finalizing Chapters...") target = bp['length_settings']['chapters'] words = bp['length_settings'].get('words', 'Flexible') include_prologue = bp.get('length_settings', {}).get('include_prologue', False) include_epilogue = bp.get('length_settings', {}).get('include_epilogue', False) structure_instructions = "" if include_prologue: structure_instructions += "- Include a 'Prologue' (chapter_number: 0) to set the scene.\n" if include_epilogue: structure_instructions += "- Include an 'Epilogue' (chapter_number: 'Epilogue') to wrap up.\n" meta = bp.get('book_metadata', {}) style = meta.get('style', {}) pov_chars = style.get('pov_characters', []) pov_instruction = "" if pov_chars: pov_instruction = f"- Assign a 'pov_character' for each chapter from this list: {json.dumps(pov_chars)}." prompt = f""" ROLE: Pacing Specialist TASK: Group events into Chapters. CONSTRAINTS: - TARGET_CHAPTERS: {target} - TARGET_WORDS: {words} - INSTRUCTIONS: {structure_instructions} {pov_instruction} INPUT_EVENTS: {json.dumps(events)} OUTPUT_FORMAT (JSON): [{{ "chapter_number": 1, "title": "String", "pov_character": "String", "pacing": "String", "estimated_words": 2000, "beats": ["String"] }}] """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) plan = json.loads(utils.clean_json(response.text)) target_str = str(words).lower().replace(',', '').replace('k', '000').replace('+', '').replace(' ', '') target_val = 0 if '-' in target_str: try: parts = target_str.split('-') target_val = int((int(parts[0]) + int(parts[1])) / 2) except: pass else: try: target_val = int(target_str) except: pass if target_val > 0: variance = random.uniform(0.90, 1.10) target_val = int(target_val * variance) utils.log("ARCHITECT", f"Target adjusted with variance ({variance:.2f}x): {target_val} words.") current_sum = sum(int(c.get('estimated_words', 0)) for c in plan) if current_sum > 0: factor = target_val / current_sum utils.log("ARCHITECT", f"Adjusting chapter lengths by {factor:.2f}x to match target.") for c in plan: c['estimated_words'] = int(c.get('estimated_words', 0) * factor) return plan except Exception as e: utils.log("ARCHITECT", f"Failed to create chapter plan: {e}") return [] def update_tracking(folder, chapter_num, chapter_text, current_tracking): utils.log("TRACKER", f"Updating world state & character visuals for Ch {chapter_num}...") prompt = f""" ROLE: Continuity Tracker TASK: Update the Story Bible based on the new chapter. INPUT_TRACKING: {json.dumps(current_tracking)} NEW_TEXT: {chapter_text[:500000]} OPERATIONS: 1. EVENTS: Append 1-3 key plot points to 'events'. 2. CHARACTERS: Update 'descriptors', 'likes_dislikes', 'speech_style', 'last_worn', 'major_events'. - "descriptors": List of strings. Add PERMANENT physical traits (height, hair, eyes), specific items (jewelry, weapons). Avoid duplicates. - "likes_dislikes": List of strings. Add specific preferences, likes, or dislikes mentioned (e.g., "Hates coffee", "Loves jazz"). - "speech_style": String. Describe how they speak (e.g. "Formal, no contractions", "Uses slang", "Stutters", "Short sentences"). - "last_worn": String. Update if specific clothing is described. IMPORTANT: If a significant time jump occurred (e.g. next day) and no new clothing is described, reset this to "Unknown". - "major_events": List of strings. Log significant life-altering events occurring in THIS chapter (e.g. "Lost an arm", "Married", "Betrayed by X"). 3. WARNINGS: Append new 'content_warnings'. OUTPUT_FORMAT (JSON): Return the updated tracking object structure. """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) new_data = json.loads(utils.clean_json(response.text)) return new_data except Exception as e: utils.log("TRACKER", f"Failed to update tracking: {e}") return current_tracking def evaluate_chapter_quality(text, chapter_title, genre, model, folder): guidelines = get_style_guidelines() ai_isms = "', '".join(guidelines['ai_isms']) fw_examples = ", ".join([f"'He {w}'" for w in guidelines['filter_words'][:5]]) # Calculate dynamic suggestion count based on length word_count = len(text.split()) if text else 0 min_sugg = max(3, int(word_count / 500)) max_sugg = min_sugg + 2 suggestion_range = f"{min_sugg}-{max_sugg}" prompt = f""" ROLE: Senior Literary Editor TASK: Critique chapter draft. METADATA: - TITLE: {chapter_title} - GENRE: {genre} PROHIBITED_PATTERNS: - AI_ISMS: {ai_isms} - FILTER_WORDS: {fw_examples} - CLICHES: White Room, As You Know Bob, Summary Mode, Anachronisms. - SYNTAX: Repetitive structure, Passive Voice, Adverb Reliance. QUALITY_RUBRIC (1-10): 1. ENGAGEMENT & TENSION: Does the story grip the reader from the first line? Is there conflict or tension in every scene? 2. SCENE EXECUTION: Is the middle of the chapter fully fleshed out? Does it avoid "sagging" or summarizing key moments? 3. VOICE & TONE: Is the narrative voice distinct? Does it match the genre? 4. SENSORY IMMERSION: Does the text engage all five senses (smell, sound, touch, etc.)? 5. SHOW, DON'T TELL: Are emotions shown through physical reactions and subtext? 6. CHARACTER AGENCY: Do characters drive the plot through active choices? 7. PACING: Does the chapter feel rushed? Does the ending land with impact, or does it cut off too abruptly? 8. GENRE APPROPRIATENESS: Are introductions of characters, places, items, or actions consistent with the {genre} conventions? 9. DIALOGUE AUTHENTICITY: Do characters sound distinct? Is there subtext? Avoids "on-the-nose" dialogue. 10. PLOT RELEVANCE: Does the chapter advance the plot or character arcs significantly? Avoids filler. 11. STAGING & FLOW: Do characters enter/exit physically? Do paragraphs transition logically (Action -> Reaction)? 12. PROSE DYNAMICS: Is there sentence variety? Is the rhythm pleasing? Avoids purple prose and excessive adjectives. 13. CLARITY & READABILITY: Is the text easy to follow? Are sentences clear and concise? SCORING_SCALE: - 10 (Masterpiece): Flawless, impactful, ready for print. - 9 (Bestseller): Exceptional quality, minor style tweaks only. - 7-8 (Professional): Good draft, solid structure, needs editing. - 6 (Passable): Average, has issues with pacing or voice. Needs heavy refinement. - 1-5 (Fail): Structural flaws, boring, or incoherent. Needs rewrite. OUTPUT_FORMAT (JSON): {{ "score": int, "critique": "Detailed analysis of flaws, citing specific examples from the text.", "actionable_feedback": "List of {suggestion_range} specific, ruthless instructions for the rewrite (e.g. 'Expand the middle dialogue', 'Add sensory details about the rain', 'Dramatize the argument instead of summarizing it')." }} """ try: response = model.generate_content([prompt, text[:30000]]) model_name = getattr(model, 'name', ai.logic_model_name) utils.log_usage(folder, model_name, response.usage_metadata) data = json.loads(utils.clean_json(response.text)) critique_text = data.get('critique', 'No critique provided.') if data.get('actionable_feedback'): critique_text += "\n\nREQUIRED FIXES:\n" + str(data.get('actionable_feedback')) return data.get('score', 0), critique_text except Exception as e: return 0, f"Evaluation error: {str(e)}" def check_pacing(bp, summary, last_chapter_text, last_chapter_data, remaining_chapters, folder): utils.log("ARCHITECT", "Checking pacing and structure health...") if not remaining_chapters: return None meta = bp.get('book_metadata', {}) genre = meta.get('genre', 'Fiction') prompt = f""" ROLE: Structural Editor TASK: Analyze pacing. CONTEXT: - PREVIOUS_SUMMARY: {summary[-3000:]} - CURRENT_CHAPTER: {last_chapter_text[-2000:]} - UPCOMING: {json.dumps([c['title'] for c in remaining_chapters[:3]])} - REMAINING_COUNT: {len(remaining_chapters)} LOGIC: - IF skipped major beats -> ADD_BRIDGE - IF covered next chapter's beats -> CUT_NEXT - ELSE -> OK OUTPUT_FORMAT (JSON): {{ "status": "ok" or "add_bridge" or "cut_next", "reason": "Explanation...", "new_chapter": {{ "title": "...", "beats": ["..."], "pov_character": "..." }} (Required if add_bridge) }} """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) return json.loads(utils.clean_json(response.text)) except Exception as e: utils.log("ARCHITECT", f"Pacing check failed: {e}") return None def create_initial_persona(bp, folder): utils.log("SYSTEM", "Generating initial Author Persona based on genre/tone...") meta = bp.get('book_metadata', {}) style = meta.get('style', {}) prompt = f""" ROLE: Creative Director TASK: Create a fictional 'Author Persona'. METADATA: - TITLE: {meta.get('title')} - GENRE: {meta.get('genre')} - TONE: {style.get('tone')} - AUDIENCE: {meta.get('target_audience')} OUTPUT_FORMAT (JSON): {{ "name": "Pen Name", "bio": "Description of writing style (voice, sentence structure, vocabulary)...", "age": "...", "gender": "..." }} """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) return json.loads(utils.clean_json(response.text)) except Exception as e: utils.log("SYSTEM", f"Persona generation failed: {e}") return {"name": "AI Author", "bio": "Standard, balanced writing style."} def refine_persona(bp, text, folder): utils.log("SYSTEM", "Refining Author Persona based on recent chapters...") ad = bp.get('book_metadata', {}).get('author_details', {}) current_bio = ad.get('bio', 'Standard style.') prompt = f""" ROLE: Literary Stylist TASK: Refine Author Bio based on text sample. INPUT_DATA: - TEXT_SAMPLE: {text[:3000]} - CURRENT_BIO: {current_bio} GOAL: Ensure future chapters sound exactly like the sample. Highlight quirks, patterns, vocabulary. OUTPUT_FORMAT (JSON): {{ "bio": "Updated bio..." }} """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) new_bio = json.loads(utils.clean_json(response.text)).get('bio') if new_bio: ad['bio'] = new_bio utils.log("SYSTEM", " -> Persona bio updated.") return ad except: pass return ad def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None): pacing = chap.get('pacing', 'Standard') est_words = chap.get('estimated_words', 'Flexible') utils.log("WRITER", f"Drafting Ch {chap['chapter_number']} ({pacing} | ~{est_words} words): {chap['title']}") ls = bp['length_settings'] meta = bp.get('book_metadata', {}) style = meta.get('style', {}) genre = meta.get('genre', 'Fiction') pov_char = chap.get('pov_character', '') ad = meta.get('author_details', {}) if not ad and 'author_bio' in meta: persona_info = meta['author_bio'] else: persona_info = f"Name: {ad.get('name', meta.get('author', 'Unknown'))}\n" if ad.get('age'): persona_info += f"Age: {ad['age']}\n" if ad.get('gender'): persona_info += f"Gender: {ad['gender']}\n" if ad.get('race'): persona_info += f"Race: {ad['race']}\n" if ad.get('nationality'): persona_info += f"Nationality: {ad['nationality']}\n" if ad.get('language'): persona_info += f"Language: {ad['language']}\n" if ad.get('bio'): persona_info += f"Style/Bio: {ad['bio']}\n" samples = [] if ad.get('sample_text'): samples.append(f"--- SAMPLE PARAGRAPH ---\n{ad['sample_text']}") if ad.get('sample_files'): for fname in ad['sample_files']: fpath = os.path.join(config.PERSONAS_DIR, fname) if os.path.exists(fpath): try: with open(fpath, 'r', encoding='utf-8', errors='ignore') as f: content = f.read(3000) samples.append(f"--- SAMPLE FROM {fname} ---\n{content}...") except: pass if samples: persona_info += "\nWRITING STYLE SAMPLES:\n" + "\n".join(samples) char_visuals = "" if tracking and 'characters' in tracking: char_visuals = "\nCHARACTER TRACKING (Visuals & Preferences):\n" for name, data in tracking['characters'].items(): desc = ", ".join(data.get('descriptors', [])) likes = ", ".join(data.get('likes_dislikes', [])) speech = data.get('speech_style', 'Unknown') worn = data.get('last_worn', 'Unknown') char_visuals += f"- {name}: {desc}\n * Speech: {speech}\n * Likes/Dislikes: {likes}\n" major = data.get('major_events', []) if major: char_visuals += f" * Major Events: {'; '.join(major)}\n" if worn and worn != 'Unknown': char_visuals += f" * Last Worn: {worn} (NOTE: Only relevant if scene is continuous from previous chapter)\n" style_block = "\n".join([f"- {k.replace('_', ' ').title()}: {v}" for k, v in style.items() if isinstance(v, (str, int, float))]) if 'tropes' in style and isinstance(style['tropes'], list): style_block += f"\n- Tropes: {', '.join(style['tropes'])}" if 'formatting_rules' in style and isinstance(style['formatting_rules'], list): style_block += "\n- Formatting Rules:\n * " + "\n * ".join(style['formatting_rules']) prev_context_block = "" if prev_content: trunc_content = prev_content[-3000:] if len(prev_content) > 3000 else prev_content prev_context_block = f"\nPREVIOUS CHAPTER TEXT (For Tone & Continuity):\n{trunc_content}\n" prompt = f""" ROLE: Fiction Writer TASK: Write Chapter {chap['chapter_number']}: {chap['title']} METADATA: - GENRE: {genre} - FORMAT: {ls.get('label', 'Story')} - PACING: {pacing} - TARGET_WORDS: ~{est_words} - POV: {pov_char if pov_char else 'Protagonist'} STYLE_GUIDE: {style_block} AUTHOR_VOICE: {persona_info} INSTRUCTIONS: - Start with the Chapter Header formatted as Markdown H1 (e.g. '# Chapter X: Title'). Follow the 'Formatting Rules' for the header style. - SENSORY ANCHORING: Start scenes by establishing Who, Where, and When immediately, anchored with a sensory detail. - DEEP POV: Immerse the reader in the POV character's immediate experience. Filter descriptions through their specific worldview and emotional state. - SHOW, DON'T TELL: Focus on immediate action and internal reaction. Don't summarize feelings; show the physical manifestation of them. - CAUSALITY: Ensure events follow a "Because of X, Y happened" logic, not just "And then X, and then Y". - STAGING: When characters enter, describe their entrance. Don't let them just "appear" in dialogue. - SENSORY DETAILS: Use specific, grounding sensory details (smell, touch, sound) rather than generic descriptions. - ACTIVE VOICE: Use active voice. Subject -> Verb -> Object. Avoid "was/were" constructions. - STRONG VERBS: Delete adverbs. Use specific verbs (e.g. "trudged" instead of "walked slowly"). - NO INFO-DUMPS: Weave backstory into dialogue or action. Do not stop the story to explain history. - AVOID CLICHÉS: Avoid common AI tropes (e.g., 'shiver down spine', 'palpable tension', 'unspoken agreement', 'testament to', 'tapestry of', 'azure', 'cerulean'). - MAINTAIN CONTINUITY: Pay close attention to the PREVIOUS CONTEXT. Characters must NOT know things that haven't happened yet or haven't been revealed to them. - CHARACTER INTERACTIONS: If characters are meeting for the first time in the summary, treat them as strangers. - SENTENCE VARIETY: Avoid repetitive sentence structures (e.g. starting multiple sentences with "He" or "She"). Vary sentence length to create rhythm. - GENRE CONSISTENCY: Ensure all introductions of characters, places, items, or actions are strictly appropriate for the {genre} genre. Avoid anachronisms or tonal clashes. QUALITY_CRITERIA: 1. ENGAGEMENT & TENSION: Grip the reader. Ensure conflict/tension in every scene. 2. SCENE EXECUTION: Flesh out the middle. Avoid summarizing key moments. 3. VOICE & TONE: Distinct narrative voice matching the genre. 4. SENSORY IMMERSION: Engage all five senses. 5. SHOW, DON'T TELL: Show emotions through physical reactions and subtext. 6. CHARACTER AGENCY: Characters must drive the plot through active choices. 7. PACING: Avoid rushing. Ensure the ending lands with impact. 8. GENRE APPROPRIATENESS: Introductions of characters, places, items, or actions must be consistent with {genre} conventions. 9. DIALOGUE AUTHENTICITY: Characters must sound distinct. Use subtext. Avoid "on-the-nose" dialogue. 10. PLOT RELEVANCE: Every scene must advance the plot or character arcs. No filler. 11. STAGING & FLOW: Characters must enter/exit physically. Paragraphs must transition logically. 12. PROSE DYNAMICS: Vary sentence length. Use strong verbs. Avoid passive voice. 13. CLARITY: Ensure sentences are clear and readable. Avoid convoluted phrasing. - 'Very Fast': Rapid fire, pure action/dialogue, minimal description. - 'Fast': Punchy, keep it moving. - 'Standard': Balanced dialogue and description. - 'Slow': Detailed, atmospheric, immersive. - 'Very Slow': Deep introspection, heavy sensory detail, slow burn. CONTEXT: - STORY_SO_FAR: {prev_sum} {prev_context_block} - CHARACTERS: {json.dumps(bp['characters'])} {char_visuals} - SCENE_BEATS: {json.dumps(chap['beats'])} OUTPUT: Markdown text. """ current_text = "" try: resp_draft = ai.model_writer.generate_content(prompt) utils.log_usage(folder, ai.model_writer.name, resp_draft.usage_metadata) current_text = resp_draft.text except Exception as e: utils.log("WRITER", f"⚠️ Failed Ch {chap['chapter_number']}: {e}") return f"## Chapter {chap['chapter_number']} Failed\n\nError: {e}" # Refinement Loop max_attempts = 5 SCORE_AUTO_ACCEPT = 9 SCORE_PASSING = 7 SCORE_REWRITE_THRESHOLD = 6 best_score = 0 best_text = current_text past_critiques = [] for attempt in range(1, max_attempts + 1): utils.log("WRITER", f" -> Evaluating Ch {chap['chapter_number']} (Attempt {attempt}/{max_attempts})...") score, critique = evaluate_chapter_quality(current_text, chap['title'], meta.get('genre', 'Fiction'), ai.model_writer, folder) past_critiques.append(f"Attempt {attempt}: {critique}") if "Evaluation error" in critique: utils.log("WRITER", f" ⚠️ {critique}. Keeping current draft.") if best_score == 0: best_text = current_text break utils.log("WRITER", f" Score: {score}/10. Critique: {critique}") if score >= SCORE_AUTO_ACCEPT: utils.log("WRITER", " 🌟 Auto-Accept threshold met.") return current_text if score > best_score: best_score = score best_text = current_text if attempt == max_attempts: if best_score >= SCORE_PASSING: utils.log("WRITER", f" ✅ Max attempts reached. Accepting best score ({best_score}).") return best_text else: utils.log("WRITER", f" ⚠️ Quality low ({best_score}/{SCORE_PASSING}) but max attempts reached. Proceeding.") return best_text if score < SCORE_REWRITE_THRESHOLD: utils.log("WRITER", f" -> Score {score} < {SCORE_REWRITE_THRESHOLD}. Triggering FULL REWRITE (Fresh Draft)...") full_rewrite_prompt = prompt + f""" [SYSTEM ALERT: QUALITY CHECK FAILED] The previous draft was rejected. CRITIQUE: {critique} NEW TASK: Discard the previous attempt. Write a FRESH version of the chapter that addresses the critique above. """ try: resp_rewrite = ai.model_logic.generate_content(full_rewrite_prompt) utils.log_usage(folder, ai.model_logic.name, resp_rewrite.usage_metadata) current_text = resp_rewrite.text continue except Exception as e: utils.log("WRITER", f"Full rewrite failed: {e}. Falling back to refinement.") utils.log("WRITER", f" -> Refining Ch {chap['chapter_number']} based on feedback...") guidelines = get_style_guidelines() fw_list = '", "'.join(guidelines['filter_words']) # Exclude current critique from history to avoid duplication in prompt history_str = "\n".join(past_critiques[:-1]) if len(past_critiques) > 1 else "None" refine_prompt = f""" ROLE: Automated Editor TASK: Rewrite text to satisfy critique and style rules. CRITIQUE: {critique} HISTORY: {history_str} CONSTRAINTS: {persona_info} {style_block} {char_visuals} - BEATS: {json.dumps(chap.get('beats', []))} OPTIMIZATION_RULES: 1. NO_FILTERS: Remove [{fw_list}]. 2. VARIETY: No consecutive sentence starts. 3. SUBTEXT: Indirect dialogue. 4. TONE: Match {meta.get('genre', 'Fiction')}. 5. INTERACTION: Use environment. 6. DRAMA: No summary mode. 7. ACTIVE_VERBS: No 'was/were' + ing. 8. SHOWING: Physical emotion. 9. LOGIC: Continuous staging. 10. CLARITY: Simple structures. INPUT_CONTEXT: - SUMMARY: {prev_sum} - PREVIOUS_TEXT: {prev_context_block} - DRAFT: {current_text} OUTPUT: Polished Markdown. """ try: # Use Writer model (Flash) for refinement to save costs (Flash 1.5 is sufficient for editing) resp_refine = ai.model_writer.generate_content(refine_prompt) utils.log_usage(folder, ai.model_writer.name, resp_refine.usage_metadata) current_text = resp_refine.text except Exception as e: utils.log("WRITER", f"Refinement failed: {e}") return best_text return best_text def harvest_metadata(bp, folder, full_manuscript): utils.log("HARVESTER", "Scanning for new characters...") full_text = "\n".join([c.get('content', '') for c in full_manuscript])[:500000] prompt = f""" ROLE: Data Extractor TASK: Identify NEW significant characters. INPUT_TEXT: {full_text} KNOWN_CHARACTERS: {json.dumps(bp['characters'])} OUTPUT_FORMAT (JSON): {{ "new_characters": [{{ "name": "String", "role": "String", "description": "String" }}] }} """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) new_chars = json.loads(utils.clean_json(response.text)).get('new_characters', []) if new_chars: valid_chars = filter_characters(new_chars) if valid_chars: utils.log("HARVESTER", f"Found {len(valid_chars)} new chars.") bp['characters'].extend(valid_chars) except: pass return bp def update_persona_sample(bp, folder): utils.log("SYSTEM", "Extracting author persona from manuscript...") ms_path = os.path.join(folder, "manuscript.json") if not os.path.exists(ms_path): return ms = utils.load_json(ms_path) if not ms: return # 1. Extract Text Sample full_text = "\n".join([c.get('content', '') for c in ms]) if len(full_text) < 500: return # 2. Save Sample File if not os.path.exists(config.PERSONAS_DIR): os.makedirs(config.PERSONAS_DIR) meta = bp.get('book_metadata', {}) safe_title = utils.sanitize_filename(meta.get('title', 'book'))[:20] timestamp = int(time.time()) filename = f"sample_{safe_title}_{timestamp}.txt" filepath = os.path.join(config.PERSONAS_DIR, filename) sample_text = full_text[:3000] with open(filepath, 'w', encoding='utf-8') as f: f.write(sample_text) # 3. Update or Create Persona author_name = meta.get('author', 'Unknown Author') personas = {} if os.path.exists(config.PERSONAS_FILE): try: with open(config.PERSONAS_FILE, 'r') as f: personas = json.load(f) except: pass if author_name not in personas: utils.log("SYSTEM", f"Generating new persona profile for '{author_name}'...") prompt = f""" ROLE: Literary Analyst TASK: Analyze writing style (Tone, Voice, Vocabulary). TEXT: {sample_text[:1000]} OUTPUT: 1-sentence author bio. """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) bio = response.text.strip() except: bio = "Style analysis unavailable." personas[author_name] = { "name": author_name, "bio": bio, "sample_files": [filename], "sample_text": sample_text[:500] } else: utils.log("SYSTEM", f"Updating persona '{author_name}' with new sample.") if 'sample_files' not in personas[author_name]: personas[author_name]['sample_files'] = [] if filename not in personas[author_name]['sample_files']: personas[author_name]['sample_files'].append(filename) with open(config.PERSONAS_FILE, 'w') as f: json.dump(personas, f, indent=2) def refine_bible(bible, instruction, folder): utils.log("SYSTEM", f"Refining Bible with instruction: {instruction}") prompt = f""" ROLE: Senior Developmental Editor TASK: Update the Bible JSON based on instruction. INPUT_DATA: - CURRENT_JSON: {json.dumps(bible)} - INSTRUCTION: {instruction} CONSTRAINTS: - Maintain valid JSON structure. - Ensure consistency. OUTPUT_FORMAT (JSON): The full updated Bible JSON object. """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) new_data = json.loads(utils.clean_json(response.text)) return new_data except Exception as e: utils.log("SYSTEM", f"Refinement failed: {e}") return None def analyze_consistency(bp, manuscript, folder): utils.log("EDITOR", "Analyzing manuscript for continuity errors...") if not manuscript: return {"issues": ["No manuscript found."], "score": 0} if not bp: return {"issues": ["No blueprint found."], "score": 0} # Summarize chapters to save tokens (pass full text if small enough, but usually summaries are safer) chapter_summaries = [] for ch in manuscript: text = ch.get('content', '') # Take first 1000 and last 1000 chars to capture setup and resolution of scenes excerpt = text[:1000] + "\n...\n" + text[-1000:] if len(text) > 2000 else text chapter_summaries.append(f"Ch {ch.get('num')}: {excerpt}") context = "\n".join(chapter_summaries) prompt = f""" ROLE: Continuity Editor TASK: Analyze book summary for plot holes. INPUT_DATA: - CHARACTERS: {json.dumps(bp.get('characters', []))} - SUMMARIES: {context} OUTPUT_FORMAT (JSON): {{ "issues": ["Issue 1", "Issue 2"], "score": 8, "summary": "Brief overall assessment." }} """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) return json.loads(utils.clean_json(response.text)) except Exception as e: return {"issues": [f"Analysis failed: {e}"], "score": 0, "summary": "Error during analysis."} def rewrite_chapter_content(bp, manuscript, chapter_num, instruction, folder): utils.log("WRITER", f"Rewriting Ch {chapter_num} with instruction: {instruction}") # Find target chapter and previous context target_chap = next((c for c in manuscript if c['num'] == chapter_num), None) if not target_chap: return None prev_text = "" # Determine previous chapter logic prev_chap = None if isinstance(chapter_num, int): prev_chap = next((c for c in manuscript if c['num'] == chapter_num - 1), None) elif str(chapter_num).lower() == "epilogue": # Find the highest numbered chapter numbered_chaps = [c for c in manuscript if isinstance(c['num'], int)] if numbered_chaps: prev_chap = max(numbered_chaps, key=lambda x: x['num']) if prev_chap: prev_text = prev_chap.get('content', '')[-3000:] # Last 3000 chars for context meta = bp.get('book_metadata', {}) style = meta.get('style', {}) # Construct Persona Info (Maintain Voice) ad = meta.get('author_details', {}) if not ad and 'author_bio' in meta: persona_info = meta['author_bio'] else: persona_info = f"Name: {ad.get('name', meta.get('author', 'Unknown'))}\n" if ad.get('bio'): persona_info += f"Style/Bio: {ad['bio']}\n" # Construct Character Visuals (Load tracking for consistency) char_visuals = "" tracking_path = os.path.join(folder, "tracking_characters.json") if os.path.exists(tracking_path): try: tracking_chars = utils.load_json(tracking_path) if tracking_chars: char_visuals = "\nCHARACTER TRACKING (Visuals & Preferences):\n" for name, data in tracking_chars.items(): desc = ", ".join(data.get('descriptors', [])) speech = data.get('speech_style', 'Unknown') char_visuals += f"- {name}: {desc}\n * Speech: {speech}\n" except: pass # Fix: Define fw_list for the prompt guidelines = get_style_guidelines() fw_list = '", "'.join(guidelines['filter_words']) prompt = f""" You are an expert fiction writing AI. Your task is to rewrite a specific chapter based on a user directive. INPUT DATA: - TITLE: {meta.get('title')} - GENRE: {meta.get('genre')} - TONE: {meta.get('style', {}).get('tone')} - AUTHOR_VOICE: {persona_info} - PREVIOUS_CONTEXT: {prev_text} - CURRENT_DRAFT: {target_chap.get('content', '')[:5000]} - CHARACTERS: {json.dumps(bp.get('characters', []))} {char_visuals} PRIMARY DIRECTIVE (USER INSTRUCTION): {instruction} EXECUTION RULES: 1. CONTINUITY: The new text must flow logically from PREVIOUS_CONTEXT. 2. ADHERENCE: The PRIMARY DIRECTIVE overrides any conflicting details in CURRENT_DRAFT. 3. VOICE: Strictly emulate the AUTHOR_VOICE. 4. GENRE: Enforce {meta.get('genre')} conventions. No anachronisms. 5. LOGIC: Enforce strict causality (Action -> Reaction). No teleporting characters. PROSE OPTIMIZATION RULES (STRICT ENFORCEMENT): - FILTER_REMOVAL: Scan for words [{fw_list}]. If found, rewrite the sentence to remove the filter and describe the sensation directly. - SENTENCE_VARIETY: Penalize consecutive sentences starting with the same pronoun or article. Vary structure. - SHOW_DONT_TELL: Convert internal summaries of emotion into physical actions or subtextual dialogue. - ACTIVE_VOICE: Convert passive voice ("was [verb]ed") to active voice. - SENSORY_ANCHORING: The first paragraph must establish the setting using at least one non-visual sense (smell, sound, touch). - SUBTEXT: Dialogue must imply meaning rather than stating it outright. RETURN JSON: {{ "content": "The full chapter text in Markdown...", "summary": "A concise summary of the chapter's events and ending state (for continuity checks)." }} """ try: response = ai.model_logic.generate_content(prompt) utils.log_usage(folder, ai.model_logic.name, response.usage_metadata) try: data = json.loads(utils.clean_json(response.text)) return data.get('content'), data.get('summary') except: # Fallback if model returns raw text instead of JSON return response.text, None except Exception as e: utils.log("WRITER", f"Rewrite failed: {e}") return None, None def check_and_propagate(bp, manuscript, changed_chap_num, folder, change_summary=None): utils.log("WRITER", f"Checking ripple effects from Ch {changed_chap_num}...") # Find the changed chapter changed_chap = next((c for c in manuscript if c['num'] == changed_chap_num), None) if not changed_chap: return None if change_summary: current_context = change_summary else: # Summarize the change to save tokens (Fallback if no summary provided) change_summary_prompt = f""" ROLE: Summarizer TASK: Summarize the key events and ending state of this chapter for continuity tracking. TEXT: {changed_chap.get('content', '')[:10000]} FOCUS: - Major plot points. - Character status changes (injuries, items acquired, location changes). - New information revealed. OUTPUT: Concise text summary. """ try: resp = ai.model_writer.generate_content(change_summary_prompt) utils.log_usage(folder, ai.model_writer.name, resp.usage_metadata) current_context = resp.text except: current_context = changed_chap.get('content', '')[-2000:] # Fallback original_change_context = current_context # Iterate subsequent chapters sorted_ms = sorted(manuscript, key=utils.chapter_sort_key) start_index = -1 for i, c in enumerate(sorted_ms): if str(c['num']) == str(changed_chap_num): start_index = i break if start_index == -1 or start_index == len(sorted_ms) - 1: return None changes_made = False consecutive_no_changes = 0 potential_impact_chapters = [] for i in range(start_index + 1, len(sorted_ms)): target_chap = sorted_ms[i] # Optimization: If 2 chapters in a row didn't need changes, assume the ripple has stopped locally. # Perform Long-Range Scan to see if we need to jump ahead. if consecutive_no_changes >= 2: if target_chap['num'] not in potential_impact_chapters: # Check if we have pending future flags future_flags = [n for n in potential_impact_chapters if isinstance(n, int) and isinstance(target_chap['num'], int) and n > target_chap['num']] if not future_flags: # No pending flags. Scan remaining chapters. remaining_chaps = sorted_ms[i:] if not remaining_chaps: break utils.log("WRITER", " -> Short-term ripple dissipated. Scanning remaining chapters for long-range impacts...") chapter_summaries = [] for rc in remaining_chaps: text = rc.get('content', '') excerpt = text[:500] + "\n...\n" + text[-500:] if len(text) > 1000 else text chapter_summaries.append(f"Ch {rc['num']}: {excerpt}") scan_prompt = f""" ROLE: Continuity Scanner TASK: Identify chapters impacted by a change. CHANGE_CONTEXT: {original_change_context} CHAPTER_SUMMARIES: {json.dumps(chapter_summaries)} CRITERIA: Identify later chapters that mention items, characters, or locations involved in the Change Context. OUTPUT_FORMAT (JSON): [Chapter_Number_Int, ...] """ try: resp = ai.model_logic.generate_content(scan_prompt) utils.log_usage(folder, ai.model_logic.name, resp.usage_metadata) potential_impact_chapters = json.loads(utils.clean_json(resp.text)) if not isinstance(potential_impact_chapters, list): potential_impact_chapters = [] # Ensure integers potential_impact_chapters = [int(x) for x in potential_impact_chapters if str(x).isdigit()] except Exception as e: utils.log("WRITER", f" -> Scan failed: {e}. Stopping.") break if not potential_impact_chapters: utils.log("WRITER", " -> No long-range impacts detected. Stopping.") break else: utils.log("WRITER", f" -> Detected potential impact in chapters: {potential_impact_chapters}") # If current chapter is still not in the list, skip it # Safety: Always check non-integer chapters (Prologue/Epilogue) to be safe if isinstance(target_chap['num'], int) and target_chap['num'] not in potential_impact_chapters: utils.log("WRITER", f" -> Skipping Ch {target_chap['num']} (Not flagged).") continue utils.log("WRITER", f" -> Checking Ch {target_chap['num']} for continuity...") prompt = f""" ROLE: Continuity Checker TASK: Determine if chapter needs rewrite based on new context. INPUT_DATA: - CHANGED_CHAPTER: {changed_chap_num} - NEW_CONTEXT: {current_context} - CURRENT_CHAPTER_TEXT: {target_chap['content'][:5000]}... DECISION_LOGIC: - Compare CURRENT_CHAPTER_TEXT with NEW_CONTEXT. - If the chapter contradicts the new context (e.g. references events that didn't happen, or characters who are now dead/absent), it needs a REWRITE. - If it fits fine, NO_CHANGE. OUTPUT_FORMAT (JSON): {{ "status": "NO_CHANGE" or "REWRITE", "reason": "Brief explanation", "content": "Full Markdown text of the rewritten chapter (ONLY if status is REWRITE, otherwise null)" }} """ try: response = ai.model_writer.generate_content(prompt) utils.log_usage(folder, ai.model_writer.name, response.usage_metadata) data = json.loads(utils.clean_json(response.text)) if data.get('status') == 'NO_CHANGE': utils.log("WRITER", f" -> Ch {target_chap['num']} is consistent.") # Update context for next iteration using existing text current_context = f"Ch {target_chap['num']} Summary: " + target_chap.get('content', '')[-2000:] consecutive_no_changes += 1 elif data.get('status') == 'REWRITE' and data.get('content'): new_text = data.get('content') if new_text: utils.log("WRITER", f" -> Rewriting Ch {target_chap['num']} to fix continuity.") target_chap['content'] = new_text changes_made = True # Update context with NEW text current_context = f"Ch {target_chap['num']} Summary: " + new_text[-2000:] consecutive_no_changes = 0 # Save immediately to prevent data loss if subsequent checks fail try: with open(os.path.join(folder, "manuscript.json"), 'w') as f: json.dump(manuscript, f, indent=2) except: pass except Exception as e: utils.log("WRITER", f" -> Check failed: {e}") return manuscript if changes_made else None