diff --git a/be/app/api/v1/endpoints/chores.py b/be/app/api/v1/endpoints/chores.py index 8e77254..cd1653d 100644 --- a/be/app/api/v1/endpoints/chores.py +++ b/be/app/api/v1/endpoints/chores.py @@ -19,11 +19,20 @@ from app.schemas.time_entry import TimeEntryPublic from app.crud import chore as crud_chore from app.crud import history as crud_history from app.crud import group as crud_group -from app.core.exceptions import ChoreNotFoundError, PermissionDeniedError, GroupNotFoundError, DatabaseIntegrityError +from app.core.exceptions import ChoreNotFoundError, PermissionDeniedError, GroupNotFoundError, DatabaseIntegrityError, GroupMembershipError, GroupPermissionError logger = logging.getLogger(__name__) router = APIRouter() +# --- Remove legacy duplicate chore endpoints (personal/* and groups/*/chores/*) --- +_UNSUPPORTED_CHORE_PATHS = { + "/personal", + "/personal/{chore_id}", + "/groups/{group_id}/chores", + "/groups/{group_id}/chores/{chore_id}", +} +router.routes = [r for r in router.routes if getattr(r, "path", None) not in _UNSUPPORTED_CHORE_PATHS] + @router.get( "/all", response_model=PyList[ChorePublic], @@ -688,4 +697,90 @@ async def stop_time_entry( await db.commit() await db.refresh(time_entry) - return time_entry \ No newline at end of file + return time_entry + +@router.post( + "", + response_model=ChorePublic, + status_code=status.HTTP_201_CREATED, + summary="Create Chore (Any Type)", + tags=["Chores"], +) +async def create_chore_any_type( + chore_in: ChoreCreate, + db: AsyncSession = Depends(get_transactional_session), + current_user: UserModel = Depends(current_active_user), +): + """Create either a personal or group chore using a single endpoint.""" + logger.info(f"User {current_user.email} creating chore (type={chore_in.type}) name={chore_in.name}") + + # Basic permission & validation + if chore_in.type == ChoreTypeEnum.personal: + if chore_in.group_id is not None: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="group_id must be null for personal chores") + elif chore_in.type == ChoreTypeEnum.group: + if chore_in.group_id is None: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="group_id is required for group chores") + # ensure membership + try: + await crud_group.check_group_membership(db, group_id=chore_in.group_id, user_id=current_user.id, action="create chores for") + except (GroupMembershipError, GroupNotFoundError) as e: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=str(e)) + else: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid chore type") + + try: + created = await crud_chore.create_chore(db=db, chore_in=chore_in, user_id=current_user.id) + return created + except Exception as e: + logger.error(f"Error creating chore: {e}", exc_info=True) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + +@router.delete( + "/{chore_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete Chore (Any Type)", + tags=["Chores"], +) +async def delete_chore_any_type( + chore_id: int, + db: AsyncSession = Depends(get_transactional_session), + current_user: UserModel = Depends(current_active_user), +): + """Delete a personal or group chore based on permissions.""" + chore = await crud_chore.get_chore_by_id(db, chore_id) + if not chore: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Chore not found") + + if chore.type == ChoreTypeEnum.personal: + if chore.created_by_id != current_user.id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="You can only delete your own personal chores") + allowed = True + target_group_id = None + else: + target_group_id = chore.group_id + try: + await crud_group.check_user_role_in_group(db, group_id=target_group_id, user_id=current_user.id, required_role=UserRoleEnum.owner, action="delete chore in group") + allowed = True + except GroupPermissionError: + # fallback: creator may delete their own group chore + allowed = (chore.created_by_id == current_user.id) + except (GroupMembershipError, GroupNotFoundError): + allowed = False + + if not allowed: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Not authorized to delete this chore") + + try: + success = await crud_chore.delete_chore(db=db, chore_id=chore_id, user_id=current_user.id, group_id=target_group_id) + if not success: + raise ChoreNotFoundError(chore_id=chore_id) + except ChoreNotFoundError: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Chore not found") + except PermissionDeniedError as e: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=e.detail) + except Exception as e: + logger.error(f"Error deleting chore {chore_id}: {e}", exc_info=True) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR) + + return Response(status_code=status.HTTP_204_NO_CONTENT) \ No newline at end of file diff --git a/be/app/api/v1/endpoints/financials.py b/be/app/api/v1/endpoints/financials.py index 13d235a..80435bf 100644 --- a/be/app/api/v1/endpoints/financials.py +++ b/be/app/api/v1/endpoints/financials.py @@ -79,9 +79,17 @@ async def create_new_expense( effective_group_id = list_obj.group_id is_group_context = True # Expense is tied to a group via the list elif expense_in.group_id: - raise InvalidOperationError(f"Personal list {list_obj.id} cannot have expense associated with group {expense_in.group_id}.") - # If list is personal, no group check needed yet, handled by payer check below. - + # Allow linking a personal list to a group expense (see TODO issue #6). + # We validate that the current user is a member of the specified group so + # they cannot attach their personal list to an arbitrary group. + effective_group_id = expense_in.group_id + is_group_context = True + await crud_group.check_group_membership( + db, + group_id=effective_group_id, + user_id=current_user.id, + action="create expense from personal list for group" + ) elif effective_group_id: # Only group_id provided for expense is_group_context = True # Ensure user is at least a member to create expense in group context @@ -579,24 +587,22 @@ async def update_settlement_details( # --- Granular Permission Check --- can_modify = False - # 1. User is involved party (payer or payee) - is_party = current_user.id in [settlement_db.paid_by_user_id, settlement_db.paid_to_user_id] - if is_party: + # 1. Original creator may modify their own record + if settlement_db.created_by_user_id == current_user.id: can_modify = True - # 2. OR User is owner of the group the settlement belongs to - # Note: Settlements always have a group_id based on current model - elif settlement_db.group_id: + # 2. Otherwise only a group owner may modify + elif settlement_db.group_id: try: - await crud_group.check_user_role_in_group(db, group_id=settlement_db.group_id, user_id=current_user.id, required_role=UserRoleEnum.owner, action="modify group settlements") + await crud_group.check_user_role_in_group( + db, + group_id=settlement_db.group_id, + user_id=current_user.id, + required_role=UserRoleEnum.owner, + action="modify group settlements created by others" + ) can_modify = True - logger.info(f"Allowing update for settlement {settlement_id} by group owner {current_user.email}") - except GroupMembershipError: + except (GroupMembershipError, GroupPermissionError, GroupNotFoundError): pass - except GroupPermissionError: - pass - except GroupNotFoundError: - logger.error(f"Group {settlement_db.group_id} not found for settlement {settlement_id} during update check.") - pass if not can_modify: raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User cannot modify this settlement (must be involved party or group owner)") @@ -634,24 +640,21 @@ async def delete_settlement_record( # --- Granular Permission Check --- can_delete = False - # 1. User is involved party (payer or payee) - is_party = current_user.id in [settlement_db.paid_by_user_id, settlement_db.paid_to_user_id] - if is_party: - can_delete = True - # 2. OR User is owner of the group the settlement belongs to - elif settlement_db.group_id: + # Only a group owner can delete a settlement (regardless of who created it) + if settlement_db.group_id: try: - await crud_group.check_user_role_in_group(db, group_id=settlement_db.group_id, user_id=current_user.id, required_role=UserRoleEnum.owner, action="delete group settlements") + await crud_group.check_user_role_in_group( + db, + group_id=settlement_db.group_id, + user_id=current_user.id, + required_role=UserRoleEnum.owner, + action="delete group settlements" + ) can_delete = True logger.info(f"Allowing delete for settlement {settlement_id} by group owner {current_user.email}") - except GroupMembershipError: + except (GroupMembershipError, GroupPermissionError, GroupNotFoundError): pass - except GroupPermissionError: - pass - except GroupNotFoundError: - logger.error(f"Group {settlement_db.group_id} not found for settlement {settlement_id} during delete check.") - pass - + if not can_delete: raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User cannot delete this settlement (must be involved party or group owner)") diff --git a/be/app/api/v1/endpoints/groups.py b/be/app/api/v1/endpoints/groups.py index 6fcfaa4..c397398 100644 --- a/be/app/api/v1/endpoints/groups.py +++ b/be/app/api/v1/endpoints/groups.py @@ -1,7 +1,7 @@ import logging from typing import List -from fastapi import APIRouter, Depends, HTTPException, status +from fastapi import APIRouter, Depends, HTTPException, status, Query from sqlalchemy.ext.asyncio import AsyncSession from app.database import get_transactional_session, get_session @@ -24,7 +24,8 @@ from app.core.exceptions import ( GroupMembershipError, GroupOperationError, GroupValidationError, - InviteCreationError + InviteCreationError, + InvalidOperationError ) logger = logging.getLogger(__name__) @@ -186,6 +187,7 @@ async def get_group_active_invite( async def delete_group( group_id: int, delete_confirmation: GroupDelete, + expected_version: int | None = Query(None, description="Current version for optimistic locking"), db: AsyncSession = Depends(get_transactional_session), current_user: UserModel = Depends(current_active_user), ): @@ -210,7 +212,12 @@ async def delete_group( ) # Delete the group - await crud_group.delete_group(db, group_id) + try: + await crud_group.delete_group(db=db, group_id=group_id, expected_version=expected_version) + except InvalidOperationError as e: + status_code = status.HTTP_409_CONFLICT if "version" in str(e).lower() else status.HTTP_400_BAD_REQUEST + raise HTTPException(status_code=status_code, detail=str(e)) + logger.info(f"Group {group_id} successfully deleted by owner {current_user.email}") return Message(detail="Group successfully deleted") diff --git a/be/app/auth.py b/be/app/auth.py index 08a1910..b09d0d0 100644 --- a/be/app/auth.py +++ b/be/app/auth.py @@ -110,6 +110,34 @@ class UserManager(IntegerIDMixin, BaseUserManager[User, int]): ): print(f"User {user.id} has logged in.") + async def delete(self, user: User, safe: bool = False, request: Optional[Request] = None): + """Soft-delete and anonymize the user instead of removing the DB row. + + This mitigates catastrophic data-loss cascades that can occur when the + user row is physically deleted (see TODO issue #3). The record is kept + for referential integrity, while all personally identifiable + information (PII) is removed and the account is marked inactive. + """ + # Lazily import to avoid circular deps and heavy imports at startup + from datetime import datetime, timezone + + # Anonymise PII – keep a unique but meaningless email address + anonymised_suffix = f"deleted_{user.id}_{int(datetime.now(timezone.utc).timestamp())}" + user.email = f"user_{anonymised_suffix}@example.com" + user.name = None + user.hashed_password = "" + user.is_active = False + user.is_verified = False + user.deleted_at = datetime.now(timezone.utc) + user.is_deleted = True + + # Persist the changes using the underlying user database adapter + await self.user_db.update(user) + + # We purposefully *do not* commit a hard delete, so any FK references + # (expenses, lists, etc.) remain intact. + return None + async def get_user_db(session: AsyncSession = Depends(get_session)): yield SQLAlchemyUserDatabase(session, User) diff --git a/be/app/config.py b/be/app/config.py index fd48199..8bf7294 100644 --- a/be/app/config.py +++ b/be/app/config.py @@ -279,8 +279,10 @@ Now, analyze the provided image and generate the JSON output. APPLE_REDIRECT_URI: str = "https://mitlistbe.mohamad.dev/api/v1/auth/apple/callback" # Session Settings - SESSION_SECRET_KEY: str = "your-session-secret-key" # Change this in production - ACCESS_TOKEN_EXPIRE_MINUTES: int = 480 # 8 hours instead of 30 minutes + # Session secret is required; fail fast if not provided via environment. + SESSION_SECRET_KEY: str | None = None # Must be set via env in production; fallback generated in dev/test + # Shorter token lifetime to reduce risk if a token is leaked. + ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 # Redis Settings REDIS_URL: str = "redis://localhost:6379" @@ -327,6 +329,18 @@ settings = Settings() if settings.DATABASE_URL is None: raise ValueError("DATABASE_URL environment variable must be set.") +# Dynamically generate a session secret in non-production environments to +# maintain backwards-compatibility with local test setups while still failing +# hard in production if a proper secret is missing. +if not settings.SESSION_SECRET_KEY: + if settings.is_production: + raise ValueError("SESSION_SECRET_KEY environment variable must be set in production") + else: + import secrets as _secrets + generated_secret = _secrets.token_urlsafe(32) + object.__setattr__(settings, "SESSION_SECRET_KEY", generated_secret) + logger.warning("SESSION_SECRET_KEY not provided; generated a temporary secret for development use.") + # Enforce secure secret key if not settings.SECRET_KEY: raise ValueError("SECRET_KEY environment variable must be set. Generate a secure key using: openssl rand -hex 32") @@ -337,9 +351,6 @@ if len(settings.SECRET_KEY) < 32: # Production-specific validations if settings.is_production: - if settings.SESSION_SECRET_KEY == "your-session-secret-key": - raise ValueError("SESSION_SECRET_KEY must be changed from default value in production") - if not settings.SENTRY_DSN: logger.warning("SENTRY_DSN not set in production environment. Error tracking will be unavailable.") diff --git a/be/app/core/error_handlers.py b/be/app/core/error_handlers.py new file mode 100644 index 0000000..38686af --- /dev/null +++ b/be/app/core/error_handlers.py @@ -0,0 +1,17 @@ +from fastapi import Request, HTTPException, status +from fastapi.responses import JSONResponse +from sqlalchemy.exc import SQLAlchemyError +import logging + +logger = logging.getLogger(__name__) + +GENERIC_DB_ERROR = "Database error, please try again." +GENERIC_SERVER_ERROR = "Internal server error. Please contact support if the problem persists." + +async def sqlalchemy_exception_handler(request: Request, exc: SQLAlchemyError): + logger.error("SQLAlchemyError", exc_info=exc) + return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"detail": GENERIC_DB_ERROR}) + +async def generic_exception_handler(request: Request, exc: Exception): + logger.error("Unhandled exception", exc_info=exc) + return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"detail": GENERIC_SERVER_ERROR}) \ No newline at end of file diff --git a/be/app/core/logging_utils.py b/be/app/core/logging_utils.py new file mode 100644 index 0000000..6c92ef1 --- /dev/null +++ b/be/app/core/logging_utils.py @@ -0,0 +1,30 @@ +import logging +import re + +EMAIL_RE = re.compile(r"[\w\.-]+@[\w\.-]+", re.IGNORECASE) + +class PiiRedactionFilter(logging.Filter): + """Filter that redacts email addresses and long numeric IDs from log records.""" + + def filter(self, record: logging.LogRecord) -> bool: + if isinstance(record.msg, dict): + # For structured logs we mutate in-place. + record.msg = self._redact_dict(record.msg) + elif isinstance(record.msg, str): + record.msg = self._redact_text(record.msg) + return True # Always log, but redacted + + def _redact_text(self, text: str) -> str: + text = EMAIL_RE.sub("", text) + # Redact numeric IDs longer than 6 digits + text = re.sub(r"(?", text) + return text + + def _redact_dict(self, data): + redacted = {} + for k, v in data.items(): + if isinstance(v, str): + redacted[k] = self._redact_text(v) + else: + redacted[k] = v + return redacted \ No newline at end of file diff --git a/be/app/core/rate_limiter.py b/be/app/core/rate_limiter.py new file mode 100644 index 0000000..e576007 --- /dev/null +++ b/be/app/core/rate_limiter.py @@ -0,0 +1,39 @@ +import time, logging, asyncio +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.requests import Request +from starlette.responses import Response, JSONResponse +from app.core.redis import redis_pool + +logger = logging.getLogger(__name__) + +RATE_LIMIT_PATHS = { + "/api/v1/auth/jwt/login": (5, 60), # 5 requests per 60 seconds per IP +} + +class RateLimitMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request: Request, call_next): + path = request.url.path + limit_cfg = RATE_LIMIT_PATHS.get(path) + if not limit_cfg: + return await call_next(request) + + max_requests, window = limit_cfg + client_ip = request.client.host if request.client else "unknown" + key = f"rate:{path}:{client_ip}" + try: + current = await redis_pool.get(key) + current_int = int(current) if current else 0 + if current_int >= max_requests: + logger.warning(f"Rate limit exceeded for {client_ip} on {path}") + return JSONResponse(status_code=429, content={"detail": "Too Many Requests"}) + # increment + pipe = redis_pool.pipeline() + pipe.incr(key, 1) + pipe.expire(key, window) + await pipe.execute() + except Exception as e: + logger.error(f"Rate limiting error: {e}") + # Fail-open if redis unavailable + pass + + return await call_next(request) \ No newline at end of file diff --git a/be/app/core/scheduler.py b/be/app/core/scheduler.py index 0844979..1d0c7b2 100644 --- a/be/app/core/scheduler.py +++ b/be/app/core/scheduler.py @@ -1,5 +1,5 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler -from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore +from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.triggers.cron import CronTrigger from app.config import settings @@ -9,14 +9,13 @@ import logging logger = logging.getLogger(__name__) -sync_db_url = settings.DATABASE_URL.replace('postgresql+asyncpg://', 'postgresql://') - jobstores = { - 'default': SQLAlchemyJobStore(url=sync_db_url) + 'default': MemoryJobStore() } +# Run scheduled jobs on a separate small thread pool to keep event loop free executors = { - 'default': ThreadPoolExecutor(20) + 'default': ThreadPoolExecutor(5) } job_defaults = { diff --git a/be/app/crud/group.py b/be/app/crud/group.py index f06d38b..08848c3 100644 --- a/be/app/crud/group.py +++ b/be/app/crud/group.py @@ -19,7 +19,8 @@ from app.core.exceptions import ( DatabaseTransactionError, GroupMembershipError, GroupPermissionError, - PermissionDeniedError + PermissionDeniedError, + InvalidOperationError ) from app.core.cache import cache @@ -146,6 +147,13 @@ async def add_user_to_group(db: AsyncSession, group_id: int, user_id: int, role: db.add(db_user_group) await db.flush() # Assigns ID to db_user_group + # Optimistic locking: bump group version atomically + await db.execute( + update(GroupModel) + .where(GroupModel.id == group_id) + .values(version=GroupModel.version + 1) + ) + # Eagerly load the 'user' and 'group' relationships for the response stmt = ( select(UserGroupModel) @@ -181,7 +189,16 @@ async def remove_user_from_group(db: AsyncSession, group_id: int, user_id: int) .where(UserGroupModel.group_id == group_id, UserGroupModel.user_id == user_id) .returning(UserGroupModel.id) ) - return result.scalar_one_or_none() is not None + deleted = result.scalar_one_or_none() is not None + + if deleted: + await db.execute( + update(GroupModel) + .where(GroupModel.id == group_id) + .values(version=GroupModel.version + 1) + ) + + return deleted except OperationalError as e: logger.error(f"Database connection error while removing user from group: {str(e)}", exc_info=True) raise DatabaseConnectionError(f"Database connection error: {str(e)}") @@ -271,7 +288,7 @@ async def check_user_role_in_group( # If role is sufficient, return None return None -async def delete_group(db: AsyncSession, group_id: int) -> None: +async def delete_group(db: AsyncSession, group_id: int, *, expected_version: int | None = None) -> None: """ Deletes a group and all its associated data (members, invites, lists, etc.). The cascade delete in the models will handle the deletion of related records. @@ -286,6 +303,12 @@ async def delete_group(db: AsyncSession, group_id: int) -> None: if not group: raise GroupNotFoundError(group_id) + # Optimistic locking – ensure caller had latest version + if expected_version is not None and group.version != expected_version: + raise InvalidOperationError( + f"Version mismatch for group {group_id}. Current version is {group.version}, expected {expected_version}." + ) + # Delete the group - cascading delete will handle related records await db.delete(group) await db.flush() diff --git a/be/app/jobs/recurring_expenses.py b/be/app/jobs/recurring_expenses.py index 7063c32..0e6c398 100644 --- a/be/app/jobs/recurring_expenses.py +++ b/be/app/jobs/recurring_expenses.py @@ -41,8 +41,11 @@ async def generate_recurring_expenses(db: AsyncSession) -> None: for expense in recurring_expenses: try: await _generate_next_occurrence(db, expense) + # Persist changes for this expense before moving to the next one + await db.commit() except Exception as e: logger.error(f"Error generating next occurrence for expense {expense.id}: {str(e)}", exc_info=True) + await db.rollback() continue except Exception as e: diff --git a/be/app/main.py b/be/app/main.py index d1fb592..60244cc 100644 --- a/be/app/main.py +++ b/be/app/main.py @@ -13,6 +13,9 @@ from app.auth import fastapi_users, auth_backend from app.schemas.user import UserPublic, UserCreate, UserUpdate from app.core.scheduler import init_scheduler, shutdown_scheduler from app.core.middleware import RequestContextMiddleware +from app.core.logging_utils import PiiRedactionFilter +from app.core.error_handlers import sqlalchemy_exception_handler, generic_exception_handler +from app.core.rate_limiter import RateLimitMiddleware if settings.SENTRY_DSN: sentry_sdk.init( @@ -29,8 +32,12 @@ logging.basicConfig( level=getattr(logging, settings.LOG_LEVEL), format=settings.LOG_FORMAT ) -logger = logging.getLogger(__name__) +# Attach PII redaction filter to root logger +root_logger = logging.getLogger() +root_logger.addFilter(PiiRedactionFilter()) + +logger = logging.getLogger(__name__) api_metadata = { **API_METADATA, @@ -51,41 +58,35 @@ app.add_middleware( # Structured logging & request tracing app.add_middleware(RequestContextMiddleware) +app.add_middleware(RateLimitMiddleware) app.add_middleware( CORSMiddleware, - allow_origins=settings.cors_origins_list, - allow_credentials=True, + allow_origins=(settings.cors_origins_list if not settings.is_production else [settings.FRONTEND_URL]), + # Credentials (cookies) are not required because we use JWTs in Authorization headers. + allow_credentials=False, allow_methods=["*"], allow_headers=["*"], expose_headers=["*"] ) +# Register exception handlers BEFORE adding middleware/router +app.add_exception_handler(Exception, generic_exception_handler) +from sqlalchemy.exc import SQLAlchemyError +app.add_exception_handler(SQLAlchemyError, sqlalchemy_exception_handler) + app.include_router(api_router, prefix=settings.API_PREFIX) @app.get("/health", tags=["Health"]) async def health_check(): - """ - Health check endpoint for load balancers and monitoring. - """ - return { - "status": settings.HEALTH_STATUS_OK, - "environment": settings.ENVIRONMENT, - "version": settings.API_VERSION - } + """Minimal health check endpoint that avoids leaking build metadata.""" + return {"status": settings.HEALTH_STATUS_OK} @app.get("/", tags=["Root"]) async def read_root(): - """ - Provides a simple welcome message at the root path. - Useful for basic reachability checks. - """ + """Public root endpoint with minimal information.""" logger.info("Root endpoint '/' accessed.") - return { - "message": settings.ROOT_MESSAGE, - "environment": settings.ENVIRONMENT, - "version": settings.API_VERSION - } + return {"message": settings.ROOT_MESSAGE} async def run_migrations(): """Run database migrations.""" @@ -118,4 +119,11 @@ async def shutdown_event(): """Cleanup services on shutdown.""" logger.info("Application shutdown: Disconnecting from database...") shutdown_scheduler() + # Close Redis connection pool to avoid leaking file descriptors. + try: + from app.core.redis import redis_pool + await redis_pool.aclose() + logger.info("Redis pool closed.") + except Exception as e: + logger.warning(f"Error closing Redis pool: {e}") logger.info("Application shutdown complete.") \ No newline at end of file diff --git a/env.production.template b/env.production.template index b1948a6..b65ad7d 100644 --- a/env.production.template +++ b/env.production.template @@ -16,6 +16,10 @@ SESSION_SECRET_KEY=your_session_secret_key_here_minimum_32_characters_long GEMINI_API_KEY=your_gemini_api_key_here # Redis Configuration +# If you are running the Redis container from docker-compose, the connection URL is usually: +# redis://:@redis:6379/0 +# Otherwise adjust host/port/password as required. +REDIS_URL=redis://:your_redis_password_here@redis:6379/0 REDIS_PASSWORD=your_redis_password_here # Sentry Configuration (Optional but recommended) @@ -43,4 +47,13 @@ APPLE_REDIRECT_URI=https://yourdomain.com/auth/apple/callback # Production Settings ENVIRONMENT=production -LOG_LEVEL=INFO \ No newline at end of file + +# Logging Configuration +# Valid LOG_LEVEL values: DEBUG, INFO, WARNING, ERROR, CRITICAL +LOG_LEVEL=INFO +# LOG_FORMAT defaults to a timestamped pattern – override only if you have special needs. +# LOG_FORMAT="%(asctime)s - %(name)s - %(levelname)s - %(message)s" + +# Auth / Security +# By default JWT access tokens live for 60 minutes; you can shorten or extend here (in minutes). +ACCESS_TOKEN_EXPIRE_MINUTES=60 \ No newline at end of file diff --git a/fe/.env b/fe/.env index e69de29..5934e2e 100644 --- a/fe/.env +++ b/fe/.env @@ -0,0 +1 @@ +VITE_API_URL=http://localhost:8000 diff --git a/fe/src/config/api-config.ts b/fe/src/config/api-config.ts index ddaa0ff..83506b8 100644 --- a/fe/src/config/api-config.ts +++ b/fe/src/config/api-config.ts @@ -2,7 +2,7 @@ export const API_VERSION = 'v1' // API Base URL -export const API_BASE_URL = (window as any).ENV?.VITE_API_URL || 'https://mitlistbe.mohamad.dev' +export const API_BASE_URL = (window as any).ENV?.VITE_API_URL // API Endpoints export const API_ENDPOINTS = {