mirror of
https://github.com/handsomezhuzhu/QQuiz.git
synced 2026-02-20 12:00:14 +00:00
feat: 实现数据库驱动的API配置管理和项目结构重组
## 新功能 - 实现管理后台API配置管理(OpenAI/Anthropic/Qwen) - API配置保存到数据库,实时生效无需重启 - API密钥遮罩显示(前10位+后4位) - 完整endpoint URL自动显示 ## 后端改进 - 新增 config_service.py 用于加载数据库配置 - LLMService 支持动态配置注入,回退到环境变量 - 更新 exam.py 和 question.py 使用数据库配置 - 扩展 schemas.py 支持所有API配置字段 ## 前端改进 - 重写 AdminSettings.jsx 增强UI体验 - API密钥显示/隐藏切换 - 当前使用的提供商可视化标识 - 移除"需要重启"的误导性提示 ## 项目结构重组 - 移动所有脚本到 scripts/ 目录 - 移动所有文档到 docs/ 目录 - 清理 Python 缓存文件 🤖 Generated with Claude Code Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -1,13 +1,15 @@
|
||||
fastapi==0.109.0
|
||||
uvicorn[standard]==0.27.0
|
||||
sqlalchemy==2.0.25
|
||||
asyncpg==0.29.0
|
||||
aiomysql==0.2.0
|
||||
pymysql==1.1.0
|
||||
alembic==1.13.1
|
||||
pydantic==2.5.3
|
||||
pydantic-settings==2.1.0
|
||||
python-dotenv==1.0.0
|
||||
python-multipart==0.0.6
|
||||
passlib[bcrypt]==1.7.4
|
||||
passlib==1.7.4
|
||||
bcrypt==4.0.1
|
||||
python-jose[cryptography]==3.3.0
|
||||
aiofiles==23.2.1
|
||||
httpx==0.26.0
|
||||
|
||||
@@ -24,11 +24,26 @@ async def get_system_config(
|
||||
result = await db.execute(select(SystemConfig))
|
||||
configs = {config.key: config.value for config in result.scalars().all()}
|
||||
|
||||
# Mask API keys (show only first 10 and last 4 characters)
|
||||
def mask_api_key(key):
|
||||
if not key or len(key) < 20:
|
||||
return key
|
||||
return f"{key[:10]}...{key[-4:]}"
|
||||
|
||||
return {
|
||||
"allow_registration": configs.get("allow_registration", "true").lower() == "true",
|
||||
"max_upload_size_mb": int(configs.get("max_upload_size_mb", "10")),
|
||||
"max_daily_uploads": int(configs.get("max_daily_uploads", "20")),
|
||||
"ai_provider": configs.get("ai_provider", "openai")
|
||||
"ai_provider": configs.get("ai_provider", "openai"),
|
||||
# API Configuration
|
||||
"openai_api_key": mask_api_key(configs.get("openai_api_key")),
|
||||
"openai_base_url": configs.get("openai_base_url", "https://api.openai.com/v1"),
|
||||
"openai_model": configs.get("openai_model", "gpt-4o-mini"),
|
||||
"anthropic_api_key": mask_api_key(configs.get("anthropic_api_key")),
|
||||
"anthropic_model": configs.get("anthropic_model", "claude-3-haiku-20240307"),
|
||||
"qwen_api_key": mask_api_key(configs.get("qwen_api_key")),
|
||||
"qwen_base_url": configs.get("qwen_base_url", "https://dashscope.aliyuncs.com/compatible-mode/v1"),
|
||||
"qwen_model": configs.get("qwen_model", "qwen-plus")
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -83,9 +83,12 @@ async def login(
|
||||
|
||||
# Create access token
|
||||
access_token = create_access_token(
|
||||
data={"sub": user.id}
|
||||
data={"sub": str(user.id)} # JWT 'sub' must be a string
|
||||
)
|
||||
|
||||
print(f"✅ Login successful: user={user.username}, id={user.id}")
|
||||
print(f"🔑 Generated token (first 50 chars): {access_token[:50]}...")
|
||||
|
||||
return {
|
||||
"access_token": access_token,
|
||||
"token_type": "bearer"
|
||||
|
||||
@@ -17,7 +17,8 @@ from schemas import (
|
||||
)
|
||||
from services.auth_service import get_current_user
|
||||
from services.document_parser import document_parser
|
||||
from services.llm_service import llm_service
|
||||
from services.llm_service import LLMService
|
||||
from services.config_service import load_llm_config
|
||||
from utils import is_allowed_file, calculate_content_hash
|
||||
|
||||
router = APIRouter()
|
||||
@@ -151,6 +152,10 @@ async def async_parse_and_save(
|
||||
if not text_content or len(text_content.strip()) < 10:
|
||||
raise Exception("Document appears to be empty or too short")
|
||||
|
||||
# Load LLM configuration from database
|
||||
llm_config = await load_llm_config(db)
|
||||
llm_service = LLMService(config=llm_config)
|
||||
|
||||
# Parse questions using LLM
|
||||
print(f"[Exam {exam_id}] Calling LLM to extract questions...")
|
||||
questions_data = await llm_service.parse_document(text_content)
|
||||
|
||||
@@ -13,7 +13,8 @@ from schemas import (
|
||||
AnswerSubmit, AnswerCheckResponse
|
||||
)
|
||||
from services.auth_service import get_current_user
|
||||
from services.llm_service import llm_service
|
||||
from services.llm_service import LLMService
|
||||
from services.config_service import load_llm_config
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -177,6 +178,10 @@ async def check_answer(
|
||||
|
||||
# Check answer based on question type
|
||||
if question.type == QuestionType.SHORT:
|
||||
# Load LLM configuration from database
|
||||
llm_config = await load_llm_config(db)
|
||||
llm_service = LLMService(config=llm_config)
|
||||
|
||||
# Use AI to grade short answer
|
||||
grading = await llm_service.grade_short_answer(
|
||||
question.content,
|
||||
|
||||
@@ -45,6 +45,15 @@ class SystemConfigUpdate(BaseModel):
|
||||
max_upload_size_mb: Optional[int] = None
|
||||
max_daily_uploads: Optional[int] = None
|
||||
ai_provider: Optional[str] = None
|
||||
# API Configuration
|
||||
openai_api_key: Optional[str] = None
|
||||
openai_base_url: Optional[str] = None
|
||||
openai_model: Optional[str] = None
|
||||
anthropic_api_key: Optional[str] = None
|
||||
anthropic_model: Optional[str] = None
|
||||
qwen_api_key: Optional[str] = None
|
||||
qwen_base_url: Optional[str] = None
|
||||
qwen_model: Optional[str] = None
|
||||
|
||||
|
||||
class SystemConfigResponse(BaseModel):
|
||||
@@ -52,6 +61,15 @@ class SystemConfigResponse(BaseModel):
|
||||
max_upload_size_mb: int
|
||||
max_daily_uploads: int
|
||||
ai_provider: str
|
||||
# API Configuration
|
||||
openai_api_key: Optional[str] = None
|
||||
openai_base_url: Optional[str] = None
|
||||
openai_model: Optional[str] = None
|
||||
anthropic_api_key: Optional[str] = None
|
||||
anthropic_model: Optional[str] = None
|
||||
qwen_api_key: Optional[str] = None
|
||||
qwen_base_url: Optional[str] = None
|
||||
qwen_model: Optional[str] = None
|
||||
|
||||
|
||||
# ============ Exam Schemas ============
|
||||
|
||||
@@ -28,13 +28,24 @@ async def get_current_user(
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
print(f"🔍 Received token (first 50 chars): {token[:50] if token else 'None'}...")
|
||||
|
||||
# Decode token
|
||||
payload = decode_access_token(token)
|
||||
if payload is None:
|
||||
print(f"❌ Token decode failed - Invalid or expired token")
|
||||
raise credentials_exception
|
||||
|
||||
user_id: int = payload.get("sub")
|
||||
user_id = payload.get("sub")
|
||||
if user_id is None:
|
||||
print(f"❌ No 'sub' in payload: {payload}")
|
||||
raise credentials_exception
|
||||
|
||||
# Convert user_id to int if it's a string
|
||||
try:
|
||||
user_id = int(user_id)
|
||||
except (ValueError, TypeError):
|
||||
print(f"❌ Invalid user_id format: {user_id}")
|
||||
raise credentials_exception
|
||||
|
||||
# Get user from database
|
||||
@@ -42,8 +53,10 @@ async def get_current_user(
|
||||
user = result.scalar_one_or_none()
|
||||
|
||||
if user is None:
|
||||
print(f"❌ User not found with id: {user_id}")
|
||||
raise credentials_exception
|
||||
|
||||
print(f"✅ User authenticated: {user.username} (id={user.id})")
|
||||
return user
|
||||
|
||||
|
||||
|
||||
43
backend/services/config_service.py
Normal file
43
backend/services/config_service.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
Configuration Service - Load system configuration from database
|
||||
"""
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select
|
||||
from typing import Dict
|
||||
from models import SystemConfig
|
||||
|
||||
|
||||
async def load_llm_config(db: AsyncSession) -> Dict[str, str]:
|
||||
"""
|
||||
Load LLM configuration from database.
|
||||
|
||||
Returns a dictionary with all LLM-related configuration:
|
||||
{
|
||||
'ai_provider': 'openai',
|
||||
'openai_api_key': 'sk-...',
|
||||
'openai_base_url': 'https://api.openai.com/v1',
|
||||
'openai_model': 'gpt-4o-mini',
|
||||
...
|
||||
}
|
||||
"""
|
||||
# Fetch all config from database
|
||||
result = await db.execute(select(SystemConfig))
|
||||
db_configs = {config.key: config.value for config in result.scalars().all()}
|
||||
|
||||
# Build configuration dictionary
|
||||
config = {
|
||||
'ai_provider': db_configs.get('ai_provider', 'openai'),
|
||||
# OpenAI
|
||||
'openai_api_key': db_configs.get('openai_api_key'),
|
||||
'openai_base_url': db_configs.get('openai_base_url', 'https://api.openai.com/v1'),
|
||||
'openai_model': db_configs.get('openai_model', 'gpt-4o-mini'),
|
||||
# Anthropic
|
||||
'anthropic_api_key': db_configs.get('anthropic_api_key'),
|
||||
'anthropic_model': db_configs.get('anthropic_model', 'claude-3-haiku-20240307'),
|
||||
# Qwen
|
||||
'qwen_api_key': db_configs.get('qwen_api_key'),
|
||||
'qwen_base_url': db_configs.get('qwen_base_url', 'https://dashscope.aliyuncs.com/compatible-mode/v1'),
|
||||
'qwen_model': db_configs.get('qwen_model', 'qwen-plus')
|
||||
}
|
||||
|
||||
return config
|
||||
@@ -15,28 +15,53 @@ from utils import calculate_content_hash
|
||||
class LLMService:
|
||||
"""Service for interacting with various LLM providers"""
|
||||
|
||||
def __init__(self):
|
||||
self.provider = os.getenv("AI_PROVIDER", "openai")
|
||||
def __init__(self, config: Optional[Dict[str, str]] = None):
|
||||
"""
|
||||
Initialize LLM Service with optional configuration.
|
||||
If config is not provided, falls back to environment variables.
|
||||
|
||||
Args:
|
||||
config: Dictionary with keys like 'ai_provider', 'openai_api_key', etc.
|
||||
"""
|
||||
# Get provider from config or environment
|
||||
self.provider = (config or {}).get("ai_provider") or os.getenv("AI_PROVIDER", "openai")
|
||||
|
||||
if self.provider == "openai":
|
||||
api_key = (config or {}).get("openai_api_key") or os.getenv("OPENAI_API_KEY")
|
||||
base_url = (config or {}).get("openai_base_url") or os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
|
||||
self.model = (config or {}).get("openai_model") or os.getenv("OPENAI_MODEL", "gpt-4o-mini")
|
||||
|
||||
if not api_key:
|
||||
raise ValueError("OpenAI API key not configured")
|
||||
|
||||
self.client = AsyncOpenAI(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
base_url=os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
|
||||
api_key=api_key,
|
||||
base_url=base_url
|
||||
)
|
||||
self.model = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
|
||||
|
||||
elif self.provider == "anthropic":
|
||||
api_key = (config or {}).get("anthropic_api_key") or os.getenv("ANTHROPIC_API_KEY")
|
||||
self.model = (config or {}).get("anthropic_model") or os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307")
|
||||
|
||||
if not api_key:
|
||||
raise ValueError("Anthropic API key not configured")
|
||||
|
||||
self.client = AsyncAnthropic(
|
||||
api_key=os.getenv("ANTHROPIC_API_KEY")
|
||||
api_key=api_key
|
||||
)
|
||||
self.model = os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307")
|
||||
|
||||
elif self.provider == "qwen":
|
||||
api_key = (config or {}).get("qwen_api_key") or os.getenv("QWEN_API_KEY")
|
||||
base_url = (config or {}).get("qwen_base_url") or os.getenv("QWEN_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")
|
||||
self.model = (config or {}).get("qwen_model") or os.getenv("QWEN_MODEL", "qwen-plus")
|
||||
|
||||
if not api_key:
|
||||
raise ValueError("Qwen API key not configured")
|
||||
|
||||
self.client = AsyncOpenAI(
|
||||
api_key=os.getenv("QWEN_API_KEY"),
|
||||
base_url=os.getenv("QWEN_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")
|
||||
api_key=api_key,
|
||||
base_url=base_url
|
||||
)
|
||||
self.model = os.getenv("QWEN_MODEL", "qwen-plus")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported AI provider: {self.provider}")
|
||||
|
||||
@@ -37,6 +37,8 @@ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -
|
||||
expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
|
||||
|
||||
to_encode.update({"exp": expire})
|
||||
print(f"🔑 Creating token with SECRET_KEY (first 20 chars): {SECRET_KEY[:20]}...")
|
||||
print(f"📦 Token payload: {to_encode}")
|
||||
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
|
||||
return encoded_jwt
|
||||
|
||||
@@ -44,9 +46,12 @@ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -
|
||||
def decode_access_token(token: str) -> Optional[dict]:
|
||||
"""Decode a JWT access token"""
|
||||
try:
|
||||
print(f"🔑 SECRET_KEY (first 20 chars): {SECRET_KEY[:20]}...")
|
||||
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
|
||||
print(f"✅ Token decoded successfully: {payload}")
|
||||
return payload
|
||||
except JWTError:
|
||||
except JWTError as e:
|
||||
print(f"❌ JWT decode error: {type(e).__name__}: {str(e)}")
|
||||
return None
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user