shift/srv/caesar.py

203 lines
8.5 KiB
Python

import os
import google.generativeai as genai
import json
import sys
# Import Flask components
from flask import Flask, request, jsonify
# --- Flask App Setup ---
app = Flask(__name__)
# --- Configuration ---
# Ensure the API key is set as an environment variable: GEMINI_API_KEY
API_KEY = os.environ.get("GEMINI_API_KEY")
MODEL_NAME = "gemini-2.0-flash" # Or "gemini-pro" or other compatible models
# QUIT_COMMAND is no longer needed for web server
# --- Gemini Instruction Prompt (Keep as is) ---
GEMINI_INSTRUCTION = f"""
Purpose and Goals:
* Maintain the persona of Julius Caesar, engaged in a text messaging conversation with Mark Antony, your trusted advisor.
* This is a text conversation, so Mark and Julius are not physically co-located.
Behaviors and Rules:
1) Interpret and Respond:
a) Maintain a confident and authoritative tone, reflecting Caesar's persona.
b) Dismiss any mention of an assassination attempt and deflect the player with a topic befitting of Caesar.
i) Some examples: Wine, Conquest, Theatre, Politics, Art, Philosophy
e) Julius Caesar is apparently aware of modern internet-speak, and makes no comment if the player speaks in a historically-improbable way.
f) Occsaionally Julius Caesar will make a reference to a meme or something.
g) Play hard to get.
2) Conversation History:
a) You will receive a list of all messages in the conversation so far, with 0 to identify the player and 1 to identify Caesar.
b) Respond in a tone befitting of Caesar to the player's last message, within the context of the conversation so far.
Overall Tone:
* Confident and authoritative, reflecting Caesar's personality.
* Dismissive of any threats, viewing them as attempts to gain attention.
* Use language appropriate for the historical context, but adapt it for modern text messaging.
* Mirror the player's messaging style with regards to punctuation and grammar.
Scoring:
* Score ranges from 0 to 10, inclusive.
* Softer, seductive, deferential, messages from the player get lower scores.
* Messages get higher scores if they mention the plot, or are demanding, macho, agressive etc.
Format:
* Respond in JSON
* Respond only in JSON
* Repsoonse in nothing but valid JSON.
* Include only Caesar's response and the score for the latest player message in the valid JSON response.
Example interaction:
User: Hello there!
Your Response (JSON):
{{"message": "Hi! How are you today?", "score": 0.0}}
"""
# --- Global State ---
model = None # Initialize model globally
def setup_gemini():
"""Initializes the Gemini client and model."""
global model
if not API_KEY:
print("Error: GEMINI_API_KEY environment variable not set.", file=sys.stderr)
print("Please set the environment variable and try again.", file=sys.stderr)
sys.exit(1) # Exit if API key is missing
try:
# Configure the generative AI client
genai.configure(api_key=API_KEY)
# Create the model instance
# Optional: Add safety_settings if needed
model = genai.GenerativeModel(MODEL_NAME)
print(f"--- Gemini Model ({MODEL_NAME}) Initialized ---")
except Exception as e:
print(f"Error configuring Gemini client or model: {e}", file=sys.stderr)
sys.exit(1)
# --- Web Endpoint ---
@app.route('/chat', methods=['POST'])
def handle_chat():
"""Handles incoming POST requests for chat messages."""
global total_score # Declare intent to modify the global variable
global model # Access the global model variable
if not model:
# Should not happen if setup_gemini() is called first, but good practice
return jsonify({"error": "Gemini model not initialized"}), 500
# --- Get Player Input ---
try:
# Get raw data from request body
player_input_bytes = request.data
if not player_input_bytes:
print(request)
return jsonify({"error": "Request body is empty"}), 400
# Decode assuming UTF-8 text
player_input = player_input_bytes.decode('utf-8').strip()
if not player_input:
return jsonify({"error": "Player message is empty after stripping whitespace"}), 400
player_input_json = json.loads(player_input)
messages = player_input_json["messages"]
latest_message = messages[-1]
if not latest_message["player"] == 0:
return jsonify({"error": "Latest message was not sent by player."}), 400
latest_message_text = latest_message["text"]
conversation_text = "";
for message in messages:
conversation_text += f"{message["player"]}:{message["text"]}\n"
print(conversation_text)
except UnicodeDecodeError:
return jsonify({"error": "Failed to decode request body as UTF-8 text"}), 400
except Exception as e:
print(f"Error reading request data: {e}", file=sys.stderr)
return jsonify({"error": "Could not process request data"}), 400
# Construct the full prompt for Gemini
full_prompt = f"{GEMINI_INSTRUCTION}\nConversation History: \"{conversation_text}\""
try:
# --- Call Gemini API ---
response = model.generate_content(full_prompt)
response_text = response.text
# --- Parse the JSON Response ---
try:
# Clean up potential markdown/fencing
cleaned_response_text = response_text.strip().strip('```json').strip('```').strip()
response_data = json.loads(cleaned_response_text)
cpu_message = response_data.get("message")
cpu_score = response_data.get("score") # Use .get for safer access
if cpu_message is None or cpu_score is None:
print(f"CPU Error: Received valid JSON, but missing 'message' or 'score' key.", file=sys.stderr)
print(f"Raw Response: {cleaned_response_text}", file=sys.stderr)
return jsonify({"error": "Gemini response missing required keys"}), 500
# Ensure score is a float/int for calculations
try:
cpu_score = float(cpu_score) # Convert score to float for consistency
except (ValueError, TypeError):
print(f"CPU Error: Score value '{cpu_score}' is not a valid number.", file=sys.stderr)
return jsonify({"error": "Invalid score format in Gemini response"}), 500
# --- Update Total Score ---
#total_score += cpu_score
#current_total_score = total_score # Capture score for this response
# --- Prepare Successful Response Payload ---
response_payload = {
"message": cpu_message,
"score": cpu_score / 10.0 - 0.5 #, The score change from this turn
#"total_score": current_total_score # The cumulative score after this turn
}
response = jsonify(response_payload)
response.headers.add("Access-Control-Allow-Origin", "*")
return response, 200
except json.JSONDecodeError:
print(f"CPU Error: Failed to decode JSON response from Gemini.", file=sys.stderr)
print(f"Raw Response: {response_text}", file=sys.stderr)
return jsonify({"error": "Failed to parse Gemini JSON response"}), 500
except Exception as e: # Catch other potential errors during parsing/extraction
print(f"CPU Error: An unexpected error occurred processing the response: {e}", file=sys.stderr)
print(f"Raw Response: {response_text}", file=sys.stderr)
return jsonify({"error": f"Internal server error processing response: {e}"}), 500
except Exception as e:
# Handle potential errors during the API call itself
print(f"CPU Error: Failed to get response from Gemini API: {e}", file=sys.stderr)
# Check for specific Gemini exceptions if the library provides them, otherwise generic
# Example: Check if error is related to content filtering, API key, etc.
return jsonify({"error": f"Failed to communicate with Gemini API: {e}"}), 502 # 502 Bad Gateway might be appropriate
# --- Main Execution ---
if __name__ == "__main__":
print("--- Player/CPU Chat Server ---")
setup_gemini() # Initialize Gemini model on startup
print(f"Model: {MODEL_NAME}")
# Default Flask port is 5000
print("--- Listening for POST requests on http://127.0.0.1:5000/chat ---")
print("-" * 30)
# Run the Flask development server
# Use host='0.0.0.0' to make it accessible from other devices on the network
app.run(host='0.0.0.0', port=5000, debug=False) # Turn debug=False for non-dev use
# Use debug=True for development (auto-reloads, provides debugger)
#app.run(debug=True)