import os import json import time import sqlite3 import shutil from datetime import datetime from huey import SqliteHuey from .web_db import db, Run, User, Project from . import utils import config from . import story, ai, marketing, export # Configure Huey (Task Queue) huey = SqliteHuey('bookapp_queue', filename=os.path.join(config.DATA_DIR, 'queue.db')) def db_log_callback(db_path, run_id, phase, msg): """Writes log entry directly to SQLite to avoid Flask Context issues in threads.""" for _ in range(5): try: with sqlite3.connect(db_path, timeout=5) as conn: conn.execute("INSERT INTO log_entry (run_id, timestamp, phase, message) VALUES (?, ?, ?, ?)", (run_id, datetime.utcnow(), phase, str(msg))) break except sqlite3.OperationalError: time.sleep(0.1) except: break def db_progress_callback(db_path, run_id, percent): """Updates run progress in SQLite.""" for _ in range(5): try: with sqlite3.connect(db_path, timeout=5) as conn: conn.execute("UPDATE run SET progress = ? WHERE id = ?", (percent, run_id)) break except sqlite3.OperationalError: time.sleep(0.1) except: break @huey.task() def generate_book_task(run_id, project_path, bible_path, allow_copy=True, feedback=None, source_run_id=None, keep_cover=False): """ Background task to run the book generation. """ # 1. Setup Logging log_filename = f"system_log_{run_id}.txt" # Log to project root initially until run folder is created by main initial_log = os.path.join(project_path, log_filename) utils.set_log_file(initial_log) # Hook up Database Logging db_path = os.path.join(config.DATA_DIR, "bookapp.db") utils.set_log_callback(lambda p, m: db_log_callback(db_path, run_id, p, m)) utils.set_progress_callback(lambda p: db_progress_callback(db_path, run_id, p)) # Set Status to Running try: with sqlite3.connect(db_path, timeout=10) as conn: conn.execute("UPDATE run SET status = 'running' WHERE id = ?", (run_id,)) except: pass utils.log("SYSTEM", f"Starting Job #{run_id}") try: # 1.1 Handle Feedback / Modification (Re-run logic) if feedback and source_run_id: utils.log("SYSTEM", f"Applying feedback to Run #{source_run_id}: '{feedback}'") # Load Source Data (Prefer final_blueprint from source run to capture its state) source_run_dir = os.path.join(project_path, "runs", f"run_{source_run_id}") bible_data = utils.load_json(bible_path) # Try to find the blueprint of the book in the source run # (Simplification: If multiple books, we apply feedback to the Bible generally) if bible_data: try: ai.init_models() new_bible = story.refine_bible(bible_data, feedback, project_path) if new_bible: bible_data = new_bible # Save updated Bible (This updates the project state to the new "fork") with open(bible_path, 'w') as f: json.dump(bible_data, f, indent=2) utils.log("SYSTEM", "Bible updated with feedback.") except Exception as e: utils.log("ERROR", f"Failed to refine bible: {e}") # 1.2 Keep Cover Art Logic if keep_cover and os.path.exists(source_run_dir): utils.log("SYSTEM", "Attempting to preserve cover art...") # We need to predict the new folder names to place the covers # main.py uses: Book_{n}_{safe_title} current_run_dir = os.path.join(project_path, "runs", f"run_{run_id}") if not os.path.exists(current_run_dir): os.makedirs(current_run_dir) # Map Source Books -> Target Books by Book Number source_books = {} for d in os.listdir(source_run_dir): if d.startswith("Book_") and os.path.isdir(os.path.join(source_run_dir, d)): parts = d.split('_') if len(parts) > 1 and parts[1].isdigit(): source_books[int(parts[1])] = os.path.join(source_run_dir, d) if bible_data and 'books' in bible_data: for i, book in enumerate(bible_data['books']): b_num = book.get('book_number', i+1) if b_num in source_books: # Found matching book in source src_folder = source_books[b_num] # Predict Target Folder safe_title = utils.sanitize_filename(book.get('title', f"Book_{b_num}")) target_folder = os.path.join(current_run_dir, f"Book_{b_num}_{safe_title}") os.makedirs(target_folder, exist_ok=True) # Copy Cover src_cover = os.path.join(src_folder, "cover.png") if os.path.exists(src_cover): shutil.copy2(src_cover, os.path.join(target_folder, "cover.png")) # Also copy cover_art.png to prevent regeneration if logic allows if os.path.exists(os.path.join(src_folder, "cover_art.png")): shutil.copy2(os.path.join(src_folder, "cover_art.png"), os.path.join(target_folder, "cover_art.png")) utils.log("SYSTEM", f" -> Copied cover for Book {b_num}") # 1.5 Copy Forward Logic (Series Optimization) is_series = False if os.path.exists(bible_path): bible_data = utils.load_json(bible_path) if bible_data: is_series = bible_data.get('project_metadata', {}).get('is_series', False) runs_dir = os.path.join(project_path, "runs") # Only copy if explicitly requested AND it's a series (Standalone books get fresh re-rolls) if allow_copy and is_series and os.path.exists(runs_dir): # Get all run folders except current all_runs = [d for d in os.listdir(runs_dir) if d.startswith("run_") and d != f"run_{run_id}"] # Sort by ID (ascending) all_runs.sort(key=lambda x: int(x.split('_')[1]) if x.split('_')[1].isdigit() else 0) if all_runs: latest_run_dir = os.path.join(runs_dir, all_runs[-1]) current_run_dir = os.path.join(runs_dir, f"run_{run_id}") os.makedirs(current_run_dir, exist_ok=True) utils.log("SYSTEM", f"Checking previous run ({all_runs[-1]}) for completed books...") for item in os.listdir(latest_run_dir): # Copy only folders that look like books and have a manuscript if item.startswith("Book_") and os.path.isdir(os.path.join(latest_run_dir, item)): if os.path.exists(os.path.join(latest_run_dir, item, "manuscript.json")): src = os.path.join(latest_run_dir, item) dst = os.path.join(current_run_dir, item) try: shutil.copytree(src, dst, dirs_exist_ok=True) utils.log("SYSTEM", f" -> Copied {item} (Skipping generation).") except Exception as e: utils.log("SYSTEM", f" -> Failed to copy {item}: {e}") # 2. Run Generation # We call the existing entry point from main import run_generation run_generation(bible_path, specific_run_id=run_id) utils.log("SYSTEM", "Job Complete.") utils.update_progress(100) status = "completed" except Exception as e: utils.log("ERROR", f"Job Failed: {e}") status = "failed" # 3. Calculate Cost & Cleanup # Use the specific run folder we know main.py used run_dir = os.path.join(project_path, "runs", f"run_{run_id}") total_cost = 0.0 final_log_path = initial_log if os.path.exists(run_dir): # Move our log file there final_log_path = os.path.join(run_dir, "web_console.log") if os.path.exists(initial_log): try: os.rename(initial_log, final_log_path) except OSError: # If rename fails (e.g. across filesystems), copy and delete shutil.copy2(initial_log, final_log_path) os.remove(initial_log) # Calculate Total Cost from all Book subfolders # usage_log.json is inside each Book folder for item in os.listdir(run_dir): item_path = os.path.join(run_dir, item) if os.path.isdir(item_path) and item.startswith("Book_"): usage_path = os.path.join(item_path, "usage_log.json") if os.path.exists(usage_path): data = utils.load_json(usage_path) total_cost += data.get('totals', {}).get('est_cost_usd', 0.0) # 4. Update Database with Final Status try: with sqlite3.connect(db_path, timeout=10) as conn: conn.execute("UPDATE run SET status = ?, cost = ?, end_time = ?, log_file = ?, progress = 100 WHERE id = ?", (status, total_cost, datetime.utcnow(), final_log_path, run_id)) except Exception as e: print(f"Failed to update run status in DB: {e}") return {"run_id": run_id, "status": status, "cost": total_cost, "final_log": final_log_path} @huey.task() def regenerate_artifacts_task(run_id, project_path, feedback=None): # Hook up Database Logging & Status db_path = os.path.join(config.DATA_DIR, "bookapp.db") # Determine log file path: Prefer the existing web_console.log in the run dir run_dir = os.path.join(project_path, "runs", f"run_{run_id}") log_file = os.path.join(run_dir, "web_console.log") # Fallback to project root temp file if run dir doesn't exist (unlikely for regeneration) if not os.path.exists(run_dir): log_file = os.path.join(project_path, f"system_log_{run_id}.txt") try: with open(log_file, 'a', encoding='utf-8') as f: f.write(f"\n\n[{datetime.utcnow().strftime('%H:%M:%S')}] --- REGENERATION STARTED ---\n") except: pass utils.set_log_file(log_file) utils.set_log_callback(lambda p, m: db_log_callback(db_path, run_id, p, m)) try: with sqlite3.connect(db_path) as conn: conn.execute("UPDATE run SET status = 'running' WHERE id = ?", (run_id,)) except: pass utils.log("SYSTEM", "Starting Artifact Regeneration...") # 1. Setup Paths # Detect Book Subfolder book_dir = run_dir if os.path.exists(run_dir): subdirs = utils.get_sorted_book_folders(run_dir) if subdirs: book_dir = os.path.join(run_dir, subdirs[0]) bible_path = os.path.join(project_path, "bible.json") if not os.path.exists(run_dir) or not os.path.exists(bible_path): utils.log("ERROR", "Run directory or Bible not found.") try: with sqlite3.connect(db_path) as conn: conn.execute("UPDATE run SET status = 'failed' WHERE id = ?", (run_id,)) except: pass return # 2. Load Data bible = utils.load_json(bible_path) final_bp_path = os.path.join(book_dir, "final_blueprint.json") ms_path = os.path.join(book_dir, "manuscript.json") if not os.path.exists(final_bp_path) or not os.path.exists(ms_path): utils.log("ERROR", f"Blueprint or Manuscript not found in {book_dir}") try: with sqlite3.connect(db_path) as conn: conn.execute("UPDATE run SET status = 'failed' WHERE id = ?", (run_id,)) except: pass return bp = utils.load_json(final_bp_path) ms = utils.load_json(ms_path) # 3. Update Blueprint with new Metadata from Bible meta = bible.get('project_metadata', {}) if 'book_metadata' in bp: # Sync all core metadata for k in ['author', 'genre', 'target_audience', 'style']: if k in meta: bp['book_metadata'][k] = meta[k] if bp.get('series_metadata', {}).get('is_series'): bp['series_metadata']['series_title'] = meta.get('title', bp['series_metadata'].get('series_title')) # Find specific book title from Bible b_num = bp['series_metadata'].get('book_number') for b in bible.get('books', []): if b.get('book_number') == b_num: bp['book_metadata']['title'] = b.get('title', bp['book_metadata'].get('title')) break else: bp['book_metadata']['title'] = meta.get('title', bp['book_metadata'].get('title')) with open(final_bp_path, 'w') as f: json.dump(bp, f, indent=2) # 4. Regenerate try: ai.init_models() tracking = None events_path = os.path.join(book_dir, "tracking_events.json") if os.path.exists(events_path): tracking = {"events": utils.load_json(events_path), "characters": utils.load_json(os.path.join(book_dir, "tracking_characters.json"))} marketing.generate_cover(bp, book_dir, tracking, feedback=feedback) export.compile_files(bp, ms, book_dir) utils.log("SYSTEM", "Regeneration Complete.") final_status = 'completed' except Exception as e: utils.log("ERROR", f"Regeneration Failed: {e}") final_status = 'failed' try: with sqlite3.connect(db_path) as conn: conn.execute("UPDATE run SET status = ? WHERE id = ?", (final_status, run_id)) except: pass @huey.task() def rewrite_chapter_task(run_id, project_path, book_folder, chap_num, instruction): """ Background task to rewrite a single chapter and propagate changes. """ try: run_dir = os.path.join(project_path, "runs", f"run_{run_id}") # --- Setup Logging for Rewrite --- # Append to the existing run log so it appears in the UI log_file = os.path.join(run_dir, "web_console.log") if not os.path.exists(log_file): log_file = os.path.join(project_path, f"system_log_{run_id}.txt") utils.set_log_file(log_file) db_path = os.path.join(config.DATA_DIR, "bookapp.db") utils.set_log_callback(lambda p, m: db_log_callback(db_path, run_id, p, m)) # --------------------------------- book_path = os.path.join(run_dir, book_folder) ms_path = os.path.join(book_path, "manuscript.json") bp_path = os.path.join(book_path, "final_blueprint.json") if not (os.path.exists(ms_path) and os.path.exists(bp_path)): utils.log("ERROR", f"Rewrite failed: files not found for run {run_id}/{book_folder}") return False ms = utils.load_json(ms_path) bp = utils.load_json(bp_path) ai.init_models() new_text = story.rewrite_chapter_content(bp, ms, chap_num, instruction, book_path) if new_text: for ch in ms: if ch.get('num') == chap_num: ch['content'] = new_text break # Save the primary rewrite immediately with open(ms_path, 'w') as f: json.dump(ms, f, indent=2) updated_ms = story.check_and_propagate(bp, ms, chap_num, book_path) if updated_ms: ms = updated_ms with open(ms_path, 'w') as f: json.dump(ms, f, indent=2) export.compile_files(bp, ms, book_path) return True return False except Exception as e: utils.log("ERROR", f"Rewrite task exception for run {run_id}/{book_folder}: {e}") return False @huey.task() def refine_bible_task(project_path, instruction, source_type, selected_keys=None): """ Background task to refine the Bible. Handles partial merging of selected keys into a temp base before refinement. """ try: bible_path = os.path.join(project_path, "bible.json") draft_path = os.path.join(project_path, "bible_draft.json") lock_path = os.path.join(project_path, ".refining") with open(lock_path, 'w') as f: f.write("running") base_bible = utils.load_json(bible_path) if not base_bible: return False # If refining from draft, load it if source_type == 'draft' and os.path.exists(draft_path): draft_bible = utils.load_json(draft_path) # If user selected specific changes, merge them into the base # This creates a "Proposed State" to refine further, WITHOUT modifying bible.json if selected_keys is not None and draft_bible: base_bible = story.merge_selected_changes(base_bible, draft_bible, selected_keys) elif draft_bible: # If no specific keys but source is draft, assume we refine the whole draft base_bible = draft_bible ai.init_models() # Run AI Refinement new_bible = story.refine_bible(base_bible, instruction, project_path) if new_bible: # Save to draft file (Overwrite previous draft) with open(draft_path, 'w') as f: json.dump(new_bible, f, indent=2) return True return False except Exception as e: utils.log("ERROR", f"Bible refinement task failed: {e}") return False finally: if os.path.exists(lock_path): os.remove(lock_path)