feat: 实现数据库驱动的API配置管理和项目结构重组

## 新功能
- 实现管理后台API配置管理(OpenAI/Anthropic/Qwen)
- API配置保存到数据库,实时生效无需重启
- API密钥遮罩显示(前10位+后4位)
- 完整endpoint URL自动显示

## 后端改进
- 新增 config_service.py 用于加载数据库配置
- LLMService 支持动态配置注入,回退到环境变量
- 更新 exam.py 和 question.py 使用数据库配置
- 扩展 schemas.py 支持所有API配置字段

## 前端改进
- 重写 AdminSettings.jsx 增强UI体验
- API密钥显示/隐藏切换
- 当前使用的提供商可视化标识
- 移除"需要重启"的误导性提示

## 项目结构重组
- 移动所有脚本到 scripts/ 目录
- 移动所有文档到 docs/ 目录
- 清理 Python 缓存文件

🤖 Generated with Claude Code

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-12-01 19:24:12 +08:00
parent 0ea8e5aa1e
commit a01f3540c5
47 changed files with 1051 additions and 129 deletions

View File

@@ -24,11 +24,26 @@ async def get_system_config(
result = await db.execute(select(SystemConfig))
configs = {config.key: config.value for config in result.scalars().all()}
# Mask API keys (show only first 10 and last 4 characters)
def mask_api_key(key):
if not key or len(key) < 20:
return key
return f"{key[:10]}...{key[-4:]}"
return {
"allow_registration": configs.get("allow_registration", "true").lower() == "true",
"max_upload_size_mb": int(configs.get("max_upload_size_mb", "10")),
"max_daily_uploads": int(configs.get("max_daily_uploads", "20")),
"ai_provider": configs.get("ai_provider", "openai")
"ai_provider": configs.get("ai_provider", "openai"),
# API Configuration
"openai_api_key": mask_api_key(configs.get("openai_api_key")),
"openai_base_url": configs.get("openai_base_url", "https://api.openai.com/v1"),
"openai_model": configs.get("openai_model", "gpt-4o-mini"),
"anthropic_api_key": mask_api_key(configs.get("anthropic_api_key")),
"anthropic_model": configs.get("anthropic_model", "claude-3-haiku-20240307"),
"qwen_api_key": mask_api_key(configs.get("qwen_api_key")),
"qwen_base_url": configs.get("qwen_base_url", "https://dashscope.aliyuncs.com/compatible-mode/v1"),
"qwen_model": configs.get("qwen_model", "qwen-plus")
}

View File

@@ -83,9 +83,12 @@ async def login(
# Create access token
access_token = create_access_token(
data={"sub": user.id}
data={"sub": str(user.id)} # JWT 'sub' must be a string
)
print(f"✅ Login successful: user={user.username}, id={user.id}")
print(f"🔑 Generated token (first 50 chars): {access_token[:50]}...")
return {
"access_token": access_token,
"token_type": "bearer"

View File

@@ -17,7 +17,8 @@ from schemas import (
)
from services.auth_service import get_current_user
from services.document_parser import document_parser
from services.llm_service import llm_service
from services.llm_service import LLMService
from services.config_service import load_llm_config
from utils import is_allowed_file, calculate_content_hash
router = APIRouter()
@@ -151,6 +152,10 @@ async def async_parse_and_save(
if not text_content or len(text_content.strip()) < 10:
raise Exception("Document appears to be empty or too short")
# Load LLM configuration from database
llm_config = await load_llm_config(db)
llm_service = LLMService(config=llm_config)
# Parse questions using LLM
print(f"[Exam {exam_id}] Calling LLM to extract questions...")
questions_data = await llm_service.parse_document(text_content)

View File

@@ -13,7 +13,8 @@ from schemas import (
AnswerSubmit, AnswerCheckResponse
)
from services.auth_service import get_current_user
from services.llm_service import llm_service
from services.llm_service import LLMService
from services.config_service import load_llm_config
router = APIRouter()
@@ -177,6 +178,10 @@ async def check_answer(
# Check answer based on question type
if question.type == QuestionType.SHORT:
# Load LLM configuration from database
llm_config = await load_llm_config(db)
llm_service = LLMService(config=llm_config)
# Use AI to grade short answer
grading = await llm_service.grade_short_answer(
question.content,