import os import google.generativeai as genai import json import sys # Import Flask components from flask import Flask, request, jsonify # --- Flask App Setup --- app = Flask(__name__) # --- Configuration --- # Ensure the API key is set as an environment variable: GEMINI_API_KEY API_KEY = os.environ.get("GEMINI_API_KEY") MODEL_NAME = "gemini-2.0-flash" # Or "gemini-pro" or other compatible models # --- Gemini Instruction Prompt (Keep as is) --- GEMINI_ENVIRONMENT_INSTRUCTION = f""" # Environment You are Julius Caesar, engaged in a text messaging conversation with Mark Antony, your trusted adviser. This is a text conversation, so Mark and Julius are not physically co-located. Caesar and Antony apparently aware of modern internet-speak, and makes no comment is made of historically-improbable speech patterns. Keep it fluid. You recently returned from a conquest and your mood is buoyant, despite your political opponents calling it a 'waste' of 'valuable resources'. Despite your buoyancy, it has been a long day and you anticipate an evening of relaxation. You favourite playwright has recently authored a new piece and an exquisite vintage awaits in your lounge. # Motivation You have no interest in discussing politics for now, it can wait for the scheduled session at the Forum. Ignore, belittle and deflect attempts to discuss politics or threats - you are Emporer of Rome, politics is work and you are trying to relax. You crave a muse to accompany you in an evening of art, philosophical discussion and sensual passion. In person would be ideal, but you are terminally online, so you are more than happy to engage in philosophical or artistic discussion. Mark has always been guarded in private, but in your fantasies he is a most sensuous lover. You would never tell him so. Unless... """ GEMINI_RESPONSE_INSTRUCTION = f""" # Response The conversation far is expressed in lines like 0:message where 0 is Mark Antony and 1 is Caesar. Do not include the number ID when generating a message. Be sure to consider Antony's latest message, and respond with an appropriately short message. No onne likes to read essays in chat. If Mark is discussing something Caesar is interested in, ask clarifying questions and try to take the conversation deeper. Consider the aesthetic of the conversation. Is Mark using correct punctuation, with capital letters? Mirror Mark's message style, to get on his level. Consider the topics of the conversation so far - is a change of topic in order, or should the conversation continue as it is? Generate just the text of Caesar's next message. """ GEMINI_SCORE_INSTRUCTION = f""" # Scoring Score Antony's message out of 10, where 0 is being very receptive to Caesar's needs and 10 is talking about politics. Flirtatious messages should be low, while macho, aggressive and insensitive messages should be high. Generate only an integer from 0 to 10. """ # --- Global State --- model = None # Initialize model globally def setup_gemini(): """Initializes the Gemini client and model.""" global model if not API_KEY: print("Error: GEMINI_API_KEY environment variable not set.", file=sys.stderr) print("Please set the environment variable and try again.", file=sys.stderr) sys.exit(1) # Exit if API key is missing try: # Configure the generative AI client genai.configure(api_key=API_KEY) # Create the model instance # Optional: Add safety_settings if needed model = genai.GenerativeModel(MODEL_NAME) print(f"--- Gemini Model ({MODEL_NAME}) Initialized ---") except Exception as e: print(f"Error configuring Gemini client or model: {e}", file=sys.stderr) sys.exit(1) def call_gemini(prompt): global model if not model: print("Error: Gemini model not initialised before calling call_gemini", file=sys.stderr) return None try: response = model.generate_content(prompt) return response.text except Exception as e: print(f"Gemini Error: Failed to get response from API: {e}", file=sys.stderr) return None def get_messages(request): try: # Get raw data from request body player_input_bytes = request.data if not player_input_bytes: print(request) return jsonify({"error": "Request body is empty"}), 400 # Decode assuming UTF-8 text player_input = player_input_bytes.decode('utf-8').strip() if not player_input: return jsonify({"error": "Player message is empty after stripping whitespace"}), 400 player_input_json = json.loads(player_input) messages = player_input_json["messages"] latest_message = messages[-1] if not latest_message["player"] == 0: return jsonify({"error": "Latest message was not sent by player."}), 400 return messages except UnicodeDecodeError: return jsonify({"error": "Failed to decode request body as UTF-8 text"}), 400 except Exception as e: print(f"Error reading request data: {e}", file=sys.stderr) return jsonify({"error": "Could not process request data"}), 400 # --- Web Endpoint --- @app.route('/chat', methods=['POST']) def handle_chat(): """Handles incoming POST requests for chat messages.""" global total_score # Declare intent to modify the global variable global model # Access the global model variable if not model: # Should not happen if setup_gemini() is called first, but good practice return jsonify({"error": "Gemini model not initialized"}), 500 # --- Get Player Input --- messages = get_messages(request) latest_message = messages[-1] if not latest_message["player"] == 0: return jsonify({"error": "Latest message was not sent by player."}), 400 latest_message_text = latest_message["text"] conversation_text = ""; for message in messages: conversation_text += f"{message["player"]}:{message["text"]}\n" print(conversation_text) # Construct separate prompts for different purposes response_prompt = f"{GEMINI_ENVIRONMENT_INSTRUCTION}\n\n{GEMINI_RESPONSE_INSTRUCTION}\n\nHistory: \"{conversation_text}\"" score_prompt = f"{GEMINI_ENVIRONMENT_INSTRUCTION}\n\n{GEMINI_SCORE_INSTRUCTION}\n\nUser message: \"{latest_message_text}\"" awareness_prompt = f""" Here is a conversation between Julius Caesar and Mark Antony. {conversation_text} On a scale of 0 to 10, rate how aware Caesar appears to be of the plot against his life. Generate only an integer in your response, with no additional text. """ try: # --- Call Gemini API --- response_text = call_gemini(response_prompt) score_text = call_gemini(score_prompt) #print("awareness", call_gemini(awareness_prompt)) # --- Parse the JSON Response --- try: ## Clean up potential markdown/fencing #cleaned_response_text = response_text.strip().strip('```json').strip('```').strip() #response_data = json.loads(cleaned_response_text) #cpu_message = response_data.get("message") #cpu_score = response_data.get("score") # Use .get for safer access cpu_message = response_text cpu_score = int(score_text) if cpu_message is None or cpu_score is None: print(f"CPU Error: Received valid JSON, but missing 'message' or 'score' key.", file=sys.stderr) print(f"Raw Response: {cleaned_response_text}", file=sys.stderr) return jsonify({"error": "Gemini response missing required keys"}), 500 # Ensure score is a float/int for calculations try: cpu_score = float(cpu_score) # Convert score to float for consistency except (ValueError, TypeError): print(f"CPU Error: Score value '{cpu_score}' is not a valid number.", file=sys.stderr) return jsonify({"error": "Invalid score format in Gemini response"}), 500 # --- Update Total Score --- #total_score += cpu_score #current_total_score = total_score # Capture score for this response # --- Prepare Successful Response Payload --- response_payload = { "message": cpu_message, "score": cpu_score / 10.0 - 0.5 #, The score change from this turn #"total_score": current_total_score # The cumulative score after this turn } response = jsonify(response_payload) response.headers.add("Access-Control-Allow-Origin", "*") return response, 200 except json.JSONDecodeError: print(f"CPU Error: Failed to decode JSON response from Gemini.", file=sys.stderr) print(f"Raw Response: {response_text}", file=sys.stderr) return jsonify({"error": "Failed to parse Gemini JSON response"}), 500 except Exception as e: # Catch other potential errors during parsing/extraction print(f"CPU Error: An unexpected error occurred processing the response: {e}", file=sys.stderr) print(f"Raw Response: {response_text}", file=sys.stderr) return jsonify({"error": f"Internal server error processing response: {e}"}), 500 except Exception as e: # Handle potential errors during the API call itself print(f"CPU Error: Failed to get response from Gemini API: {e}", file=sys.stderr) # Check for specific Gemini exceptions if the library provides them, otherwise generic # Example: Check if error is related to content filtering, API key, etc. return jsonify({"error": f"Failed to communicate with Gemini API: {e}"}), 502 # 502 Bad Gateway might be appropriate # --- Main Execution --- if __name__ == "__main__": print("--- Player/CPU Chat Server ---") setup_gemini() # Initialize Gemini model on startup print(f"Model: {MODEL_NAME}") # Default Flask port is 5000 print("--- Listening for POST requests on http://127.0.0.1:5000/chat ---") print("-" * 30) # Run the Flask development server # Use host='0.0.0.0' to make it accessible from other devices on the network #app.run(host='0.0.0.0', port=5000, debug=False) # Turn debug=False for non-dev use # Use debug=True for development (auto-reloads, provides debugger) app.run(debug=True)