v2.0.0: Modularize project into single-responsibility packages
Replaced monolithic modules/ package with a clean architecture:
- core/ config.py, utils.py
- ai/ models.py (ResilientModel), setup.py (init_models)
- story/ planner.py, writer.py, editor.py, style_persona.py, bible_tracker.py
- marketing/ cover.py, blurb.py, fonts.py, assets.py
- export/ exporter.py
- web/ app.py (Flask factory), db.py, helpers.py, tasks.py, routes/{auth,project,run,persona,admin}.py
- cli/ engine.py (run_generation), wizard.py (BookWizard)
Flask routes split into 5 Blueprints; all templates updated with blueprint-
prefixed url_for() calls. Dockerfile and docker-compose updated to use
web.app entry point and new package paths.
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
180
story/style_persona.py
Normal file
180
story/style_persona.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from core import config, utils
|
||||
from ai import models as ai_models
|
||||
|
||||
|
||||
def get_style_guidelines():
|
||||
defaults = {
|
||||
"ai_isms": [
|
||||
'testament to', 'tapestry', 'shiver down spine', 'unspoken agreement',
|
||||
'palpable tension', 'a sense of', 'suddenly', 'in that moment',
|
||||
'symphony of', 'dance of', 'azure', 'cerulean'
|
||||
],
|
||||
"filter_words": [
|
||||
'felt', 'saw', 'heard', 'realized', 'decided', 'noticed', 'knew', 'thought'
|
||||
]
|
||||
}
|
||||
path = os.path.join(config.DATA_DIR, "style_guidelines.json")
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
user_data = utils.load_json(path)
|
||||
if user_data:
|
||||
if 'ai_isms' in user_data: defaults['ai_isms'] = user_data['ai_isms']
|
||||
if 'filter_words' in user_data: defaults['filter_words'] = user_data['filter_words']
|
||||
except: pass
|
||||
else:
|
||||
try:
|
||||
with open(path, 'w') as f: json.dump(defaults, f, indent=2)
|
||||
except: pass
|
||||
return defaults
|
||||
|
||||
|
||||
def refresh_style_guidelines(model, folder=None):
|
||||
utils.log("SYSTEM", "Refreshing Style Guidelines via AI...")
|
||||
current = get_style_guidelines()
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Literary Editor
|
||||
TASK: Update 'Banned Words' lists for AI writing.
|
||||
|
||||
INPUT_DATA:
|
||||
- CURRENT_AI_ISMS: {json.dumps(current.get('ai_isms', []))}
|
||||
- CURRENT_FILTER_WORDS: {json.dumps(current.get('filter_words', []))}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Review lists. Remove false positives.
|
||||
2. Add new common AI tropes (e.g. 'neon-lit', 'bustling', 'a sense of', 'mined', 'delved').
|
||||
3. Ensure robustness.
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "ai_isms": [strings], "filter_words": [strings] }}
|
||||
"""
|
||||
try:
|
||||
response = model.generate_content(prompt)
|
||||
model_name = getattr(model, 'name', ai_models.logic_model_name)
|
||||
if folder: utils.log_usage(folder, model_name, response.usage_metadata)
|
||||
new_data = json.loads(utils.clean_json(response.text))
|
||||
|
||||
if 'ai_isms' in new_data and 'filter_words' in new_data:
|
||||
path = os.path.join(config.DATA_DIR, "style_guidelines.json")
|
||||
with open(path, 'w') as f: json.dump(new_data, f, indent=2)
|
||||
utils.log("SYSTEM", "Style Guidelines updated.")
|
||||
return new_data
|
||||
except Exception as e:
|
||||
utils.log("SYSTEM", f"Failed to refresh guidelines: {e}")
|
||||
return current
|
||||
|
||||
|
||||
def create_initial_persona(bp, folder):
|
||||
utils.log("SYSTEM", "Generating initial Author Persona based on genre/tone...")
|
||||
meta = bp.get('book_metadata', {})
|
||||
style = meta.get('style', {})
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Creative Director
|
||||
TASK: Create a fictional 'Author Persona'.
|
||||
|
||||
METADATA:
|
||||
- TITLE: {meta.get('title')}
|
||||
- GENRE: {meta.get('genre')}
|
||||
- TONE: {style.get('tone')}
|
||||
- AUDIENCE: {meta.get('target_audience')}
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "name": "Pen Name", "bio": "Description of writing style (voice, sentence structure, vocabulary)...", "age": "...", "gender": "..." }}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
return json.loads(utils.clean_json(response.text))
|
||||
except Exception as e:
|
||||
utils.log("SYSTEM", f"Persona generation failed: {e}")
|
||||
return {"name": "AI Author", "bio": "Standard, balanced writing style."}
|
||||
|
||||
|
||||
def refine_persona(bp, text, folder):
|
||||
utils.log("SYSTEM", "Refining Author Persona based on recent chapters...")
|
||||
ad = bp.get('book_metadata', {}).get('author_details', {})
|
||||
current_bio = ad.get('bio', 'Standard style.')
|
||||
|
||||
prompt = f"""
|
||||
ROLE: Literary Stylist
|
||||
TASK: Refine Author Bio based on text sample.
|
||||
|
||||
INPUT_DATA:
|
||||
- TEXT_SAMPLE: {text[:3000]}
|
||||
- CURRENT_BIO: {current_bio}
|
||||
|
||||
GOAL: Ensure future chapters sound exactly like the sample. Highlight quirks, patterns, vocabulary.
|
||||
|
||||
OUTPUT_FORMAT (JSON): {{ "bio": "Updated bio..." }}
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
new_bio = json.loads(utils.clean_json(response.text)).get('bio')
|
||||
if new_bio:
|
||||
ad['bio'] = new_bio
|
||||
utils.log("SYSTEM", " -> Persona bio updated.")
|
||||
return ad
|
||||
except: pass
|
||||
return ad
|
||||
|
||||
|
||||
def update_persona_sample(bp, folder):
|
||||
utils.log("SYSTEM", "Extracting author persona from manuscript...")
|
||||
|
||||
ms_path = os.path.join(folder, "manuscript.json")
|
||||
if not os.path.exists(ms_path): return
|
||||
ms = utils.load_json(ms_path)
|
||||
if not ms: return
|
||||
|
||||
full_text = "\n".join([c.get('content', '') for c in ms])
|
||||
if len(full_text) < 500: return
|
||||
|
||||
if not os.path.exists(config.PERSONAS_DIR): os.makedirs(config.PERSONAS_DIR)
|
||||
|
||||
meta = bp.get('book_metadata', {})
|
||||
safe_title = utils.sanitize_filename(meta.get('title', 'book'))[:20]
|
||||
timestamp = int(time.time())
|
||||
filename = f"sample_{safe_title}_{timestamp}.txt"
|
||||
filepath = os.path.join(config.PERSONAS_DIR, filename)
|
||||
|
||||
sample_text = full_text[:3000]
|
||||
with open(filepath, 'w', encoding='utf-8') as f: f.write(sample_text)
|
||||
|
||||
author_name = meta.get('author', 'Unknown Author')
|
||||
|
||||
personas = {}
|
||||
if os.path.exists(config.PERSONAS_FILE):
|
||||
try:
|
||||
with open(config.PERSONAS_FILE, 'r') as f: personas = json.load(f)
|
||||
except: pass
|
||||
|
||||
if author_name not in personas:
|
||||
utils.log("SYSTEM", f"Generating new persona profile for '{author_name}'...")
|
||||
prompt = f"""
|
||||
ROLE: Literary Analyst
|
||||
TASK: Analyze writing style (Tone, Voice, Vocabulary).
|
||||
TEXT: {sample_text[:1000]}
|
||||
OUTPUT: 1-sentence author bio.
|
||||
"""
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
bio = response.text.strip()
|
||||
except: bio = "Style analysis unavailable."
|
||||
|
||||
personas[author_name] = {
|
||||
"name": author_name,
|
||||
"bio": bio,
|
||||
"sample_files": [filename],
|
||||
"sample_text": sample_text[:500]
|
||||
}
|
||||
else:
|
||||
utils.log("SYSTEM", f"Updating persona '{author_name}' with new sample.")
|
||||
if 'sample_files' not in personas[author_name]: personas[author_name]['sample_files'] = []
|
||||
if filename not in personas[author_name]['sample_files']:
|
||||
personas[author_name]['sample_files'].append(filename)
|
||||
|
||||
with open(config.PERSONAS_FILE, 'w') as f: json.dump(personas, f, indent=2)
|
||||
Reference in New Issue
Block a user