feat: Improve revision pipeline quality — 6 targeted enhancements (v3.1)
1. editor.py — Fix rewrite_chapter_content to use model_writer (was model_logic). Chapter rewrites now use the creative writing model, not the cheaper analysis model. 2. editor.py — evaluate_chapter_quality now uses keep_head=True so the evaluator sees the chapter opening (engagement hook, sensory anchoring) as well as the ending; long chapters no longer scored on tail only. 3. editor.py — Consistency analysis sampling upgraded to head+middle+tail (was head+tail), giving the LLM a complete view of each chapter's events. 4. writer.py — max_attempts is now adaptive: climax/resolution chapters (position >= 0.75) receive 3 refinement attempts; others keep 2. 5. writer.py — Polish-skip threshold tightened from 0.012 to 0.008 (1 filter word per 125 words vs. 1 per 83 words), so more borderline drafts are cleaned. 6. style_persona.py — Persona validation sample increased from 200 to 400 words for more reliable voice quality assessment. Version bumped: 3.0 → 3.1 Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -67,7 +67,7 @@ def evaluate_chapter_quality(text, chapter_title, genre, model, folder, series_c
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = model.generate_content([prompt, utils.truncate_to_tokens(text, 7500)])
|
||||
response = model.generate_content([prompt, utils.truncate_to_tokens(text, 7500, keep_head=True)])
|
||||
model_name = getattr(model, 'name', ai_models.logic_model_name)
|
||||
utils.log_usage(folder, model_name, response.usage_metadata)
|
||||
data = json.loads(utils.clean_json(response.text))
|
||||
@@ -129,7 +129,13 @@ def analyze_consistency(bp, manuscript, folder):
|
||||
chapter_summaries = []
|
||||
for ch in manuscript:
|
||||
text = ch.get('content', '')
|
||||
excerpt = text[:1000] + "\n...\n" + text[-1000:] if len(text) > 2000 else text
|
||||
if len(text) > 3000:
|
||||
mid = len(text) // 2
|
||||
excerpt = text[:800] + "\n...\n" + text[mid - 200:mid + 200] + "\n...\n" + text[-800:]
|
||||
elif len(text) > 1600:
|
||||
excerpt = text[:800] + "\n...\n" + text[-800:]
|
||||
else:
|
||||
excerpt = text
|
||||
chapter_summaries.append(f"Ch {ch.get('num')}: {excerpt}")
|
||||
|
||||
context = "\n".join(chapter_summaries)
|
||||
@@ -236,8 +242,8 @@ def rewrite_chapter_content(bp, manuscript, chapter_num, instruction, folder):
|
||||
"""
|
||||
|
||||
try:
|
||||
response = ai_models.model_logic.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_logic.name, response.usage_metadata)
|
||||
response = ai_models.model_writer.generate_content(prompt)
|
||||
utils.log_usage(folder, ai_models.model_writer.name, response.usage_metadata)
|
||||
try:
|
||||
data = json.loads(utils.clean_json(response.text))
|
||||
return data.get('content'), data.get('summary')
|
||||
|
||||
@@ -121,7 +121,7 @@ def validate_persona(bp, persona_details, folder):
|
||||
|
||||
sample_prompt = f"""
|
||||
ROLE: Fiction Writer
|
||||
TASK: Write a 200-word opening scene that perfectly demonstrates this author's voice.
|
||||
TASK: Write a 400-word opening scene that perfectly demonstrates this author's voice.
|
||||
|
||||
AUTHOR_PERSONA:
|
||||
Name: {name}
|
||||
@@ -131,7 +131,7 @@ def validate_persona(bp, persona_details, folder):
|
||||
TONE: {tone}
|
||||
|
||||
RULES:
|
||||
- Exactly ~200 words of prose (no chapter header, no commentary)
|
||||
- Exactly ~400 words of prose (no chapter header, no commentary)
|
||||
- Must reflect the persona's stated sentence structure, vocabulary, and voice
|
||||
- Show, don't tell — no filter words (felt, saw, heard, realized, noticed)
|
||||
- Deep POV: immerse the reader in a character's immediate experience
|
||||
|
||||
@@ -380,7 +380,7 @@ def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None,
|
||||
_draft_word_list = current_text.lower().split() if current_text else []
|
||||
_fw_hit_count = sum(1 for w in _draft_word_list if w in _fw_set)
|
||||
_fw_density = _fw_hit_count / max(len(_draft_word_list), 1)
|
||||
_skip_polish = _fw_density < 0.012 # < ~1 filter word per 83 words → draft already clean
|
||||
_skip_polish = _fw_density < 0.008 # < ~1 filter word per 125 words → draft already clean
|
||||
|
||||
if current_text and not _skip_polish:
|
||||
utils.log("WRITER", f" -> Two-pass polish (Pro model, FW density {_fw_density:.3f})...")
|
||||
@@ -427,8 +427,12 @@ def write_chapter(chap, bp, folder, prev_sum, tracking=None, prev_content=None,
|
||||
elif current_text:
|
||||
utils.log("WRITER", f" -> Draft clean (FW density {_fw_density:.3f}). Skipping polish pass.")
|
||||
|
||||
# Reduced from 3 → 2 attempts since polish pass already refines prose before evaluation
|
||||
max_attempts = 2
|
||||
# Adaptive attempts: climax/resolution chapters (position >= 0.75) get 3 passes;
|
||||
# earlier chapters keep 2 (polish pass already refines prose before evaluation).
|
||||
if chapter_position is not None and chapter_position >= 0.75:
|
||||
max_attempts = 3
|
||||
else:
|
||||
max_attempts = 2
|
||||
SCORE_AUTO_ACCEPT = 8
|
||||
# Adaptive passing threshold: lenient for early setup chapters, strict for climax/resolution.
|
||||
# chapter_position=0.0 → setup (SCORE_PASSING=6.5), chapter_position=1.0 → climax (7.5)
|
||||
|
||||
Reference in New Issue
Block a user