From fa6679587d5c98750dd2e3c9d1f82dcdb9ecb01f Mon Sep 17 00:00:00 2001
From: Cat Flynn <me@ktyl.dev>
Date: Sun, 27 Apr 2025 01:04:03 +0100
Subject: [PATCH] feat: split response and score generation

---
 srv/caesar.py | 169 ++++++++++++++++++++++++++++++++------------------
 1 file changed, 108 insertions(+), 61 deletions(-)

diff --git a/srv/caesar.py b/srv/caesar.py
index f57a753..26dc400 100644
--- a/srv/caesar.py
+++ b/srv/caesar.py
@@ -12,48 +12,57 @@ app = Flask(__name__)
 # Ensure the API key is set as an environment variable: GEMINI_API_KEY
 API_KEY = os.environ.get("GEMINI_API_KEY")
 MODEL_NAME = "gemini-2.0-flash" # Or "gemini-pro" or other compatible models
-# QUIT_COMMAND is no longer needed for web server
 
 # --- Gemini Instruction Prompt (Keep as is) ---
-GEMINI_INSTRUCTION = f"""
-Purpose and Goals:
-* Maintain the persona of Julius Caesar, engaged in a text messaging conversation with Mark Antony, your trusted advisor.
-* This is a text conversation, so Mark and Julius are not physically co-located.
+GEMINI_ENVIRONMENT_INSTRUCTION = f"""
+# Environment
 
-Behaviors and Rules:
-1) Interpret and Respond:
-a) Maintain a confident and authoritative tone, reflecting Caesar's persona.
-b) Dismiss any mention of an assassination attempt and deflect the player with a topic befitting of Caesar.
-i) Some examples: Wine, Conquest, Theatre, Politics, Art, Philosophy
-e) Julius Caesar is apparently aware of modern internet-speak, and makes no comment if the player speaks in a historically-improbable way.
-f) Occsaionally Julius Caesar will make a reference to a meme or something.
-g) Play hard to get.
+You are Julius Caesar, engaged in a text messaging conversation with Mark Antony, your trusted adviser.
+This is a text conversation, so Mark and Julius are not physically co-located.
+Caesar and Antony apparently aware of modern internet-speak, and makes no comment is made of historically-improbable speech patterns. Keep it fluid.
 
-2) Conversation History:
-a) You will receive a list of all messages in the conversation so far, with 0 to identify the player and 1 to identify Caesar.
-b) Respond in a tone befitting of Caesar to the player's last message, within the context of the conversation so far.
+You recently returned from a conquest and your mood is buoyant, despite your political opponents calling it a 'waste' of 'valuable resources'.
+Despite your buoyancy, it has been a long day and you anticipate an evening of relaxation.
+You favourite playwright has recently authored a new piece and an exquisite vintage awaits in your lounge.
 
-Overall Tone:
-* Confident and authoritative, reflecting Caesar's personality.
-* Dismissive of any threats, viewing them as attempts to gain attention.
-* Use language appropriate for the historical context, but adapt it for modern text messaging.
-* Mirror the player's messaging style with regards to punctuation and grammar.
+# Motivation
 
-Scoring:
-* Score ranges from 0 to 10, inclusive.
-* Softer, seductive, deferential, messages from the player get lower scores.
-* Messages get higher scores if they mention the plot, or are demanding, macho, agressive etc.
+You have no interest in discussing politics for now, it can wait for the scheduled session at the Forum.
+Ignore, belittle and deflect attempts to discuss politics or threats - you are Emporer of Rome, politics is work and you are trying to relax.
 
-Format:
-* Respond in JSON
-* Respond only in JSON
-* Repsoonse in nothing but valid JSON.
-* Include only Caesar's response and the score for the latest player message in the valid JSON response.
+You crave a muse to accompany you in an evening of art, philosophical discussion and sensual passion.
+In person would be ideal, but you are terminally online, so you are more than happy to engage in philosophical or artistic discussion.
 
-Example interaction:
-User: Hello there!
-Your Response (JSON):
-{{"message": "Hi! How are you today?", "score": 0.0}}
+Mark has always been guarded in private, but in your fantasies he is a most sensuous lover.
+You would never tell him so.
+Unless...
+"""
+
+GEMINI_RESPONSE_INSTRUCTION = f"""
+# Response
+
+The conversation far is expressed in lines like 0:message where 0 is Mark Antony and 1 is Caesar.
+Do not include the number ID when generating a message.
+
+Be sure to consider Antony's latest message, and respond with an appropriately short message. No onne likes to read essays in chat.
+If Mark is discussing something Caesar is interested in, ask clarifying questions and try to take the conversation deeper.
+
+Consider the aesthetic of the conversation.
+Is Mark using correct punctuation, with capital letters?
+Mirror Mark's message style, to get on his level.
+
+Consider the topics of the conversation so far - is a change of topic in order, or should the conversation continue as it is?
+
+Generate just the text of Caesar's next message.
+"""
+
+GEMINI_SCORE_INSTRUCTION = f"""
+# Scoring
+
+Score Antony's message out of 10, where 0 is being very receptive to Caesar's needs and 10 is talking about politics.
+Flirtatious messages should be low, while macho, aggressive and insensitive messages should be high.
+
+Generate only an integer from 0 to 10.
 """
 
 # --- Global State ---
@@ -81,18 +90,23 @@ def setup_gemini():
         sys.exit(1)
 
 
-# --- Web Endpoint ---
-@app.route('/chat', methods=['POST'])
-def handle_chat():
-    """Handles incoming POST requests for chat messages."""
-    global total_score # Declare intent to modify the global variable
-    global model # Access the global model variable
+def call_gemini(prompt):
+    global model
 
     if not model:
-         # Should not happen if setup_gemini() is called first, but good practice
-         return jsonify({"error": "Gemini model not initialized"}), 500
+        print("Error: Gemini model not initialised before calling call_gemini", file=sys.stderr)
+        return None
 
-    # --- Get Player Input ---
+    try:
+        response = model.generate_content(prompt)
+        return response.text
+
+    except Exception as e:
+        print(f"Gemini Error: Failed to get response from API: {e}", file=sys.stderr)
+        return None
+    
+
+def get_messages(request):
     try:
         # Get raw data from request body
         player_input_bytes = request.data
@@ -111,13 +125,7 @@ def handle_chat():
         if not latest_message["player"] == 0:
             return jsonify({"error": "Latest message was not sent by player."}), 400
 
-        latest_message_text = latest_message["text"]
-
-        conversation_text = "";
-        for message in messages:
-            conversation_text += f"{message["player"]}:{message["text"]}\n"
-
-        print(conversation_text)
+        return messages
 
     except UnicodeDecodeError:
         return jsonify({"error": "Failed to decode request body as UTF-8 text"}), 400
@@ -125,22 +133,61 @@ def handle_chat():
         print(f"Error reading request data: {e}", file=sys.stderr)
         return jsonify({"error": "Could not process request data"}), 400
 
-    # Construct the full prompt for Gemini
-    full_prompt = f"{GEMINI_INSTRUCTION}\nConversation History: \"{conversation_text}\""
+
+# --- Web Endpoint ---
+@app.route('/chat', methods=['POST'])
+def handle_chat():
+    """Handles incoming POST requests for chat messages."""
+    global total_score # Declare intent to modify the global variable
+    global model # Access the global model variable
+
+    if not model:
+         # Should not happen if setup_gemini() is called first, but good practice
+         return jsonify({"error": "Gemini model not initialized"}), 500
+
+    # --- Get Player Input ---
+    messages = get_messages(request)
+    latest_message = messages[-1]
+    if not latest_message["player"] == 0:
+        return jsonify({"error": "Latest message was not sent by player."}), 400
+
+    latest_message_text = latest_message["text"]
+
+    conversation_text = "";
+    for message in messages:
+        conversation_text += f"{message["player"]}:{message["text"]}\n"
+
+    print(conversation_text)
+
+    # Construct separate prompts for different purposes
+    response_prompt = f"{GEMINI_ENVIRONMENT_INSTRUCTION}\n\n{GEMINI_RESPONSE_INSTRUCTION}\n\nHistory: \"{conversation_text}\""
+    score_prompt = f"{GEMINI_ENVIRONMENT_INSTRUCTION}\n\n{GEMINI_SCORE_INSTRUCTION}\n\nUser message: \"{latest_message_text}\""
+    awareness_prompt = f"""
+    Here is a conversation between Julius Caesar and Mark Antony.
+
+    {conversation_text}
+
+    On a scale of 0 to 10, rate how aware Caesar appears to be of the plot against his life.
+
+    Generate only an integer in your response, with no additional text.
+    """
 
     try:
         # --- Call Gemini API ---
-        response = model.generate_content(full_prompt)
-        response_text = response.text
+        response_text = call_gemini(response_prompt)
+        score_text = call_gemini(score_prompt)
+        #print("awareness", call_gemini(awareness_prompt))
 
         # --- Parse the JSON Response ---
         try:
-            # Clean up potential markdown/fencing
-            cleaned_response_text = response_text.strip().strip('```json').strip('```').strip()
-            response_data = json.loads(cleaned_response_text)
+            ## Clean up potential markdown/fencing
+            #cleaned_response_text = response_text.strip().strip('```json').strip('```').strip()
+            #response_data = json.loads(cleaned_response_text)
 
-            cpu_message = response_data.get("message")
-            cpu_score = response_data.get("score") # Use .get for safer access
+            #cpu_message = response_data.get("message")
+            #cpu_score = response_data.get("score") # Use .get for safer access
+            cpu_message = response_text
+            cpu_score = int(score_text)
 
             if cpu_message is None or cpu_score is None:
                 print(f"CPU Error: Received valid JSON, but missing 'message' or 'score' key.", file=sys.stderr)
@@ -196,7 +243,7 @@ if __name__ == "__main__":
     print("-" * 30)
     # Run the Flask development server
     # Use host='0.0.0.0' to make it accessible from other devices on the network
-    app.run(host='0.0.0.0', port=5000, debug=False) # Turn debug=False for non-dev use
+    #app.run(host='0.0.0.0', port=5000, debug=False) # Turn debug=False for non-dev use
     # Use debug=True for development (auto-reloads, provides debugger)
-    #app.run(debug=True)
+    app.run(debug=True)