feat: HR Portal - Complete Multi-Tenant System with Redis Session Storage

Major Features:
-  Multi-tenant architecture (tenant isolation)
-  Employee CRUD with lifecycle management (onboarding/offboarding)
-  Department tree structure with email domain management
-  Company info management (single-record editing)
-  System functions CRUD (permission management)
-  Email account management (multi-account per employee)
-  Keycloak SSO integration (auth.lab.taipei)
-  Redis session storage (10.1.0.254:6379)
  - Solves Cookie 4KB limitation
  - Cross-system session sharing
  - Sliding expiration (8 hours)
  - Automatic token refresh

Technical Stack:
Backend:
- FastAPI + SQLAlchemy
- PostgreSQL 16 (10.1.0.20:5433)
- Keycloak Admin API integration
- Docker Mailserver integration (SSH)
- Alembic migrations

Frontend:
- Next.js 14 (App Router)
- NextAuth 4 with Keycloak Provider
- Redis session storage (ioredis)
- Tailwind CSS

Infrastructure:
- Redis 7 (10.1.0.254:6379) - Session + Cache
- Keycloak 26.1.0 (auth.lab.taipei)
- Docker Mailserver (10.1.0.254)

Architecture Highlights:
- Session管理由 Keycloak + Redis 統一控制
- 支援多系統 (HR/WebMail/Calendar/Drive/Office) 共享 session
- Token 自動刷新,異質服務整合
- 未來可無縫遷移到雲端

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-23 20:12:43 +08:00
commit 360533393f
386 changed files with 70353 additions and 0 deletions

View File

@@ -0,0 +1,4 @@
"""
批次作業模組
包含所有定時排程的批次處理任務
"""

View File

@@ -0,0 +1,160 @@
"""
審計日誌歸檔批次 (5.3)
執行時間: 每月 1 日 01:00
批次名稱: archive_audit_logs
將 90 天前的審計日誌匯出為 CSV並從主資料庫刪除
歸檔目錄: /mnt/nas/working/audit_logs/
"""
import csv
import logging
import os
from datetime import datetime, timedelta
from typing import Optional
from app.batch.base import log_batch_execution
logger = logging.getLogger(__name__)
ARCHIVE_DAYS = 90 # 保留最近 90 天,超過的歸檔
ARCHIVE_BASE_DIR = "/mnt/nas/working/audit_logs"
def _get_archive_dir() -> str:
"""取得歸檔目錄,不存在時建立"""
os.makedirs(ARCHIVE_BASE_DIR, exist_ok=True)
return ARCHIVE_BASE_DIR
def run_archive_audit_logs(dry_run: bool = False) -> dict:
"""
執行審計日誌歸檔批次
Args:
dry_run: True 時只統計不實際刪除
Returns:
執行結果摘要
"""
started_at = datetime.utcnow()
cutoff_date = datetime.utcnow() - timedelta(days=ARCHIVE_DAYS)
logger.info(f"=== 開始審計日誌歸檔批次 === 截止日期: {cutoff_date.strftime('%Y-%m-%d')}")
if dry_run:
logger.info("[DRY RUN] 不會實際刪除資料")
from app.db.session import get_db
from app.models.audit_log import AuditLog
db = next(get_db())
try:
# 1. 查詢超過 90 天的日誌
old_logs = db.query(AuditLog).filter(
AuditLog.performed_at < cutoff_date
).order_by(AuditLog.performed_at).all()
total_count = len(old_logs)
logger.info(f"找到 {total_count} 筆待歸檔日誌")
if total_count == 0:
message = f"無需歸檔 (截止日期 {cutoff_date.strftime('%Y-%m-%d')} 前無記錄)"
log_batch_execution(
batch_name="archive_audit_logs",
status="success",
message=message,
started_at=started_at,
)
return {"status": "success", "archived": 0, "message": message}
# 2. 匯出到 CSV
archive_month = cutoff_date.strftime("%Y%m")
archive_dir = _get_archive_dir()
csv_path = os.path.join(archive_dir, f"archive_{archive_month}.csv")
fieldnames = [
"id", "action", "resource_type", "resource_id",
"performed_by", "ip_address",
"details", "performed_at"
]
logger.info(f"匯出至: {csv_path}")
with open(csv_path, "w", newline="", encoding="utf-8") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for log in old_logs:
writer.writerow({
"id": log.id,
"action": log.action,
"resource_type": log.resource_type,
"resource_id": log.resource_id,
"performed_by": getattr(log, "performed_by", ""),
"ip_address": getattr(log, "ip_address", ""),
"details": str(getattr(log, "details", "")),
"performed_at": str(log.performed_at),
})
logger.info(f"已匯出 {total_count} 筆至 {csv_path}")
# 3. 刪除舊日誌 (非 dry_run 才執行)
deleted_count = 0
if not dry_run:
for log in old_logs:
db.delete(log)
db.commit()
deleted_count = total_count
logger.info(f"已刪除 {deleted_count} 筆舊日誌")
else:
logger.info(f"[DRY RUN] 將刪除 {total_count} 筆 (未實際執行)")
# 4. 記錄批次執行日誌
finished_at = datetime.utcnow()
message = (
f"歸檔 {total_count} 筆到 {csv_path}"
+ (f"; 已刪除 {deleted_count}" if not dry_run else " (DRY RUN)")
)
log_batch_execution(
batch_name="archive_audit_logs",
status="success",
message=message,
started_at=started_at,
finished_at=finished_at,
)
logger.info(f"=== 審計日誌歸檔批次完成 === {message}")
return {
"status": "success",
"archived": total_count,
"deleted": deleted_count,
"csv_path": csv_path,
}
except Exception as e:
error_msg = f"審計日誌歸檔批次失敗: {str(e)}"
logger.error(error_msg)
try:
db.rollback()
except Exception:
pass
log_batch_execution(
batch_name="archive_audit_logs",
status="failed",
message=error_msg,
started_at=started_at,
)
return {"status": "failed", "error": str(e)}
finally:
db.close()
if __name__ == "__main__":
import sys
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../.."))
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true", help="只統計不實際刪除")
args = parser.parse_args()
result = run_archive_audit_logs(dry_run=args.dry_run)
print(f"執行結果: {result}")

59
backend/app/batch/base.py Normal file
View File

@@ -0,0 +1,59 @@
"""
批次作業基礎工具
提供 log_batch_execution 等共用函式
"""
import logging
from datetime import datetime
from typing import Optional
logger = logging.getLogger(__name__)
def log_batch_execution(
batch_name: str,
status: str,
message: Optional[str] = None,
started_at: Optional[datetime] = None,
finished_at: Optional[datetime] = None,
) -> None:
"""
記錄批次執行日誌到資料庫
Args:
batch_name: 批次名稱
status: 執行狀態 (success/failed/warning)
message: 執行訊息
started_at: 開始時間 (若未提供則使用 finished_at)
finished_at: 完成時間 (若未提供則使用現在)
"""
from app.db.session import get_db
from app.models.batch_log import BatchLog
now = datetime.utcnow()
finished = finished_at or now
started = started_at or finished
duration = None
if started and finished:
duration = int((finished - started).total_seconds())
try:
db = next(get_db())
log_entry = BatchLog(
batch_name=batch_name,
status=status,
message=message,
started_at=started,
finished_at=finished,
duration_seconds=duration,
)
db.add(log_entry)
db.commit()
logger.info(f"[{batch_name}] 批次執行記錄已寫入: {status}")
except Exception as e:
logger.error(f"[{batch_name}] 寫入批次日誌失敗: {e}")
finally:
try:
db.close()
except Exception:
pass

View File

@@ -0,0 +1,152 @@
"""
每日配額檢查批次 (5.1)
執行時間: 每日 02:00
批次名稱: daily_quota_check
檢查郵件和雲端硬碟配額使用情況,超過 80% 發送告警
"""
import logging
from datetime import datetime
from app.batch.base import log_batch_execution
logger = logging.getLogger(__name__)
QUOTA_ALERT_THRESHOLD = 0.8 # 超過 80% 發送告警
ALERT_EMAIL = "admin@porscheworld.tw"
def _send_alert_email(to: str, subject: str, body: str) -> bool:
"""
發送告警郵件
目前使用 SMTP 直送,未來可整合 Mailserver
"""
try:
import smtplib
from email.mime.text import MIMEText
from app.core.config import settings
msg = MIMEText(body, "plain", "utf-8")
msg["Subject"] = subject
msg["From"] = settings.MAIL_ADMIN_USER
msg["To"] = to
with smtplib.SMTP(settings.MAIL_SERVER, settings.MAIL_PORT) as smtp:
if settings.MAIL_USE_TLS:
smtp.starttls()
smtp.login(settings.MAIL_ADMIN_USER, settings.MAIL_ADMIN_PASSWORD)
smtp.send_message(msg)
logger.info(f"告警郵件已發送至 {to}: {subject}")
return True
except Exception as e:
logger.warning(f"發送告警郵件失敗: {e}")
return False
def run_daily_quota_check() -> dict:
"""
執行每日配額檢查批次
Returns:
執行結果摘要
"""
started_at = datetime.utcnow()
alerts_sent = 0
errors = []
summary = {
"email_checked": 0,
"email_alerts": 0,
"drive_checked": 0,
"drive_alerts": 0,
}
logger.info("=== 開始每日配額檢查批次 ===")
# 取得資料庫 Session
from app.db.session import get_db
from app.models.email_account import EmailAccount
from app.models.network_drive import NetworkDrive
db = next(get_db())
try:
# 1. 檢查郵件配額
logger.info("檢查郵件配額使用情況...")
email_accounts = db.query(EmailAccount).filter(
EmailAccount.is_active == True
).all()
for account in email_accounts:
summary["email_checked"] += 1
# 目前郵件 Mailserver API 未整合,跳過實際配額查詢
# TODO: 整合 Mailserver API 後取得實際使用量
# usage_mb = mailserver_service.get_usage(account.email_address)
# if usage_mb and usage_mb / account.quota_mb > QUOTA_ALERT_THRESHOLD:
# _send_alert_email(...)
pass
logger.info(f"郵件帳號檢查完成: {summary['email_checked']} 個帳號")
# 2. 檢查雲端硬碟配額 (Drive Service API)
logger.info("檢查雲端硬碟配額使用情況...")
network_drives = db.query(NetworkDrive).filter(
NetworkDrive.is_active == True
).all()
from app.services.drive_service import get_drive_service_client
drive_client = get_drive_service_client()
for drive in network_drives:
summary["drive_checked"] += 1
try:
# 查詢配額使用量 (Drive Service 未上線時會回傳 None)
# 注意: drive.id 是資料庫 ID需要 drive_user_id
# 目前跳過實際查詢,等 Drive Service 上線後補充
pass
except Exception as e:
logger.warning(f"查詢 {drive.drive_name} 配額失敗: {e}")
logger.info(f"雲端硬碟檢查完成: {summary['drive_checked']} 個帳號")
# 3. 記錄批次執行日誌
finished_at = datetime.utcnow()
message = (
f"郵件帳號: {summary['email_checked']} 個, 告警: {summary['email_alerts']} 個; "
f"雲端硬碟: {summary['drive_checked']} 個, 告警: {summary['drive_alerts']}"
)
log_batch_execution(
batch_name="daily_quota_check",
status="success",
message=message,
started_at=started_at,
finished_at=finished_at,
)
logger.info(f"=== 每日配額檢查批次完成 === {message}")
return {"status": "success", "summary": summary}
except Exception as e:
error_msg = f"每日配額檢查批次失敗: {str(e)}"
logger.error(error_msg)
log_batch_execution(
batch_name="daily_quota_check",
status="failed",
message=error_msg,
started_at=started_at,
)
return {"status": "failed", "error": str(e)}
finally:
db.close()
if __name__ == "__main__":
import sys
import os
# 允許直接執行此批次
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../.."))
logging.basicConfig(level=logging.INFO)
result = run_daily_quota_check()
print(f"執行結果: {result}")

View File

@@ -0,0 +1,103 @@
"""
批次作業排程器 (5.4)
使用 schedule 套件管理所有批次排程
排程清單:
- 每日 00:00 - auto_terminate_employees (未來實作)
- 每日 02:00 - daily_quota_check
- 每日 03:00 - sync_keycloak_users
- 每月 1 日 01:00 - archive_audit_logs
啟動方式:
python -m app.batch.scheduler
"""
import logging
import signal
import sys
import time
from datetime import datetime
logger = logging.getLogger(__name__)
def _setup_logging():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
def _run_daily_quota_check():
logger.info("觸發: 每日配額檢查批次")
try:
from app.batch.daily_quota_check import run_daily_quota_check
result = run_daily_quota_check()
logger.info(f"每日配額檢查批次完成: {result.get('status')}")
except Exception as e:
logger.error(f"每日配額檢查批次異常: {e}")
def _run_sync_keycloak_users():
logger.info("觸發: Keycloak 同步批次")
try:
from app.batch.sync_keycloak_users import run_sync_keycloak_users
result = run_sync_keycloak_users()
logger.info(f"Keycloak 同步批次完成: {result.get('status')}")
except Exception as e:
logger.error(f"Keycloak 同步批次異常: {e}")
def _run_archive_audit_logs():
"""只在每月 1 日執行"""
if datetime.now().day != 1:
return
logger.info("觸發: 審計日誌歸檔批次 (每月 1 日)")
try:
from app.batch.archive_audit_logs import run_archive_audit_logs
result = run_archive_audit_logs()
logger.info(f"審計日誌歸檔批次完成: {result.get('status')}")
except Exception as e:
logger.error(f"審計日誌歸檔批次異常: {e}")
def start_scheduler():
"""啟動排程器"""
try:
import schedule
except ImportError:
logger.error("缺少 schedule 套件,請執行: pip install schedule")
sys.exit(1)
logger.info("=== HR Portal 批次排程器啟動 ===")
# 每日 02:00 - 配額檢查
schedule.every().day.at("02:00").do(_run_daily_quota_check)
# 每日 03:00 - Keycloak 同步
schedule.every().day.at("03:00").do(_run_sync_keycloak_users)
# 每日 01:00 - 審計日誌歸檔 (函式內部判斷是否為每月 1 日)
schedule.every().day.at("01:00").do(_run_archive_audit_logs)
logger.info("排程設定完成:")
logger.info(" 02:00 - 每日配額檢查")
logger.info(" 03:00 - Keycloak 同步")
logger.info(" 01:00 - 審計日誌歸檔 (每月 1 日)")
# 處理 SIGTERM (Docker 停止信號)
def handle_sigterm(signum, frame):
logger.info("收到停止信號,排程器正在關閉...")
sys.exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
logger.info("排程器運行中,等待任務觸發...")
while True:
schedule.run_pending()
time.sleep(60) # 每分鐘檢查一次
if __name__ == "__main__":
_setup_logging()
start_scheduler()

View File

@@ -0,0 +1,146 @@
"""
Keycloak 同步批次 (5.2)
執行時間: 每日 03:00
批次名稱: sync_keycloak_users
同步 Keycloak 使用者狀態到 HR Portal
以 HR Portal 為準 (Single Source of Truth)
"""
import logging
from datetime import datetime
from app.batch.base import log_batch_execution
logger = logging.getLogger(__name__)
def run_sync_keycloak_users() -> dict:
"""
執行 Keycloak 同步批次
以 HR Portal 員工狀態為準,同步到 Keycloak:
- active → Keycloak enabled = True
- terminated/on_leave → Keycloak enabled = False
Returns:
執行結果摘要
"""
started_at = datetime.utcnow()
summary = {
"total_checked": 0,
"synced": 0,
"not_found_in_keycloak": 0,
"no_keycloak_id": 0,
"errors": 0,
}
issues = []
logger.info("=== 開始 Keycloak 同步批次 ===")
from app.db.session import get_db
from app.models.employee import Employee
from app.services.keycloak_admin_client import get_keycloak_admin_client
db = next(get_db())
try:
# 1. 取得所有員工
employees = db.query(Employee).all()
keycloak_client = get_keycloak_admin_client()
logger.info(f"{len(employees)} 位員工待檢查")
for emp in employees:
summary["total_checked"] += 1
# 跳過沒有 Keycloak ID 的員工 (尚未執行到職流程)
# 以 username_base 查詢 Keycloak
username = emp.username_base
if not username:
summary["no_keycloak_id"] += 1
continue
try:
# 2. 查詢 Keycloak 使用者
kc_user = keycloak_client.get_user_by_username(username)
if not kc_user:
# Keycloak 使用者不存在,可能尚未建立
summary["not_found_in_keycloak"] += 1
logger.debug(f"員工 {emp.employee_id} ({username}) 在 Keycloak 中不存在,跳過")
continue
kc_user_id = kc_user.get("id")
kc_enabled = kc_user.get("enabled", False)
# 3. 判斷應有的 enabled 狀態
should_be_enabled = (emp.status == "active")
# 4. 狀態不一致時,以 HR Portal 為準同步到 Keycloak
if kc_enabled != should_be_enabled:
success = keycloak_client.update_user(
kc_user_id, {"enabled": should_be_enabled}
)
if success:
summary["synced"] += 1
logger.info(
f"✓ 同步 {emp.employee_id} ({username}): "
f"Keycloak enabled {kc_enabled}{should_be_enabled} "
f"(HR 狀態: {emp.status})"
)
else:
summary["errors"] += 1
issues.append(f"{emp.employee_id}: 同步失敗")
logger.warning(f"✗ 同步 {emp.employee_id} ({username}) 失敗")
except Exception as e:
summary["errors"] += 1
issues.append(f"{emp.employee_id}: {str(e)}")
logger.error(f"處理員工 {emp.employee_id} 時發生錯誤: {e}")
# 5. 記錄批次執行日誌
finished_at = datetime.utcnow()
message = (
f"檢查: {summary['total_checked']}, "
f"同步: {summary['synced']}, "
f"Keycloak 無帳號: {summary['not_found_in_keycloak']}, "
f"錯誤: {summary['errors']}"
)
if issues:
message += f"\n問題清單: {'; '.join(issues[:10])}"
if len(issues) > 10:
message += f" ... 共 {len(issues)} 個問題"
status = "failed" if summary["errors"] > 0 else "success"
log_batch_execution(
batch_name="sync_keycloak_users",
status=status,
message=message,
started_at=started_at,
finished_at=finished_at,
)
logger.info(f"=== Keycloak 同步批次完成 === {message}")
return {"status": status, "summary": summary}
except Exception as e:
error_msg = f"Keycloak 同步批次失敗: {str(e)}"
logger.error(error_msg)
log_batch_execution(
batch_name="sync_keycloak_users",
status="failed",
message=error_msg,
started_at=started_at,
)
return {"status": "failed", "error": str(e)}
finally:
db.close()
if __name__ == "__main__":
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../.."))
logging.basicConfig(level=logging.INFO)
result = run_sync_keycloak_users()
print(f"執行結果: {result}")