import uuid
from fastapi import FastAPI, HTTPException
import openai
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from dotenv import load_dotenv
import os
from redis import Redis
import re
from difflib import SequenceMatcher
import string
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
redis_client = Redis(host='localhost', port=6379, db=0, decode_responses=True)
class ExerciseInput(BaseModel):
user_id: str
language: str
level: str
class EvaluationInput(BaseModel):
user_id: str
session_id: str
user_response: str
def set_session(user_id, session_id, data):
key = f"{user_id}:{session_id}"
redis_client.set(key, data)
def get_session(user_id, session_id):
key = f"{user_id}:{session_id}"
return redis_client.get(key)
def delete_session(user_id, session_id):
key = f"{user_id}:{session_id}"
redis_client.delete(key)
def parse_openai_response(response_content):
patterns = [
r'^(.*?):\s*(.+?)\s*\n\nEnglish:\s*(.+)$',
r'^(.+?)\s*\n\nEnglish:\s*(.+)$',
]
for pattern in patterns:
match = re.search(pattern, response_content, re.DOTALL | re.IGNORECASE)
if match:
question = match.group(match.lastindex - 1).strip()
translation = match.group(match.lastindex).strip()
return question, translation
parts = response_content.strip().split('\n')
english_index = next((i for i, part in enumerate(parts) if 'English:' in part), None)
if english_index and english_index > 0:
question = parts[english_index - 1].split(': ', 1)[-1].strip()
translation = parts[english_index].split(': ', 1)[-1].strip()
return question, translation
return None, None
@app.post("/generate_exercise/")
async def generate_exercise(input: ExerciseInput):
context_prompts = {
"Tamil": {
"beginner": "Create a simple Tamil question and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response",
"intermediate": "Create a moderate Tamil sentence and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response",
"advanced": "Create a complex Tamil sentence and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response"
},
"French": {
"beginner": "Create a simple French question and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response",
"intermediate": "Create a moderate French question and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response",
"advanced": "Create a complex French question and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response"
},
"Spanish": {
"beginner": "Create a simple Spanish question and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response",
"intermediate": "Create a moderate Spanish question and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response",
"advanced": "Create a complex Spanish question and its English translation for language learner.Try to generate diffrent questions everytime and maintain the format of your response"
}
}
if input.language not in context_prompts or input.level not in context_prompts[input.language]:
raise HTTPException(status_code=400, detail="Invalid language or level")
prompt = context_prompts[input.language][input.level]
try:
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": "You are a helpful language learning assistant."},
{"role": "user", "content": prompt}]
)
content = response.choices[0].message.content
question, correct_answer = parse_openai_response(content)
if question is None or correct_answer is None:
raise ValueError("Failed to parse the OpenAI response")
session_id = str(uuid.uuid4())
set_session(input.user_id, session_id, correct_answer)
return {"user_id": input.user_id, "session_id": session_id, "question": question}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
def normalize_answer(answer):
answer = answer.lower().translate(str.maketrans('', '', string.punctuation))
answer = " ".join(answer.split())
return answer
@app.post("/evaluate/")
async def evaluate(input: EvaluationInput):
if not get_session(input.user_id, input.session_id):
raise HTTPException(status_code=404, detail="Session not found")
correct_answer = get_session(input.user_id, input.session_id)
normalized_user_response = normalize_answer(input.user_response)
normalized_correct_answer = normalize_answer(correct_answer)
similarity = SequenceMatcher(None, normalized_user_response, normalized_correct_answer).ratio()
similarity_threshold = 0.8
if similarity >= similarity_threshold:
result = "Correct"
success = True
else:
result = f"Incorrect! Similarity: {similarity:.2f}"
success = False
delete_session(input.user_id, input.session_id)
return {"success": success, "result": result, "correct_answer": correct_answer, "similarity": similarity}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8006, ssl_keyfile="privkey.pem", ssl_certfile="fullchain.pem")