Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/SurfSense.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

100 changes: 100 additions & 0 deletions surfsense_backend/alembic/versions/23_add_connector_schedules_table.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
"""Add connector schedules table
Revision ID: 23
Revises: 22
"""

from collections.abc import Sequence

from sqlalchemy import inspect

from alembic import op

# revision identifiers, used by Alembic.
revision: str = "23"
down_revision: str | None = "22"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None


def upgrade() -> None:
"""Upgrade schema - add ScheduleType enum and connector_schedules table."""

# Create ScheduleType enum if it doesn't exist
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'scheduletype') THEN
CREATE TYPE scheduletype AS ENUM ('HOURLY', 'DAILY', 'WEEKLY', 'CUSTOM');
END IF;
END$$;
"""
)

# Create connector_schedules table if it doesn't exist
op.execute(
"""
CREATE TABLE IF NOT EXISTS connector_schedules (
id SERIAL PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
connector_id INTEGER NOT NULL REFERENCES search_source_connectors(id) ON DELETE CASCADE,
search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE,
schedule_type scheduletype NOT NULL,
cron_expression VARCHAR(100),
daily_time TIME,
weekly_day SMALLINT,
weekly_time TIME,
hourly_minute SMALLINT,
is_active BOOLEAN NOT NULL DEFAULT TRUE,
last_run_at TIMESTAMPTZ,
next_run_at TIMESTAMPTZ,
CONSTRAINT uq_connector_search_space UNIQUE (connector_id, search_space_id)
);
Comment on lines +38 to +53
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | πŸ”΄ Critical

Add missing updated_at column to match ORM TimestampMixin

The ConnectorSchedule model inherits TimestampMixin (created_at, updated_at). Table DDL creates created_at but omits updated_at, which will cause runtime SQL errors (column not found) when ORM selects/updates it.

Apply this diff to add updated_at:

         CREATE TABLE IF NOT EXISTS connector_schedules (
             id SERIAL PRIMARY KEY,
             created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
+            updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
             connector_id INTEGER NOT NULL REFERENCES search_source_connectors(id) ON DELETE CASCADE,
             search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE,
             schedule_type scheduletype NOT NULL,

Optional: the explicit index on id is redundant (PK already indexed). Consider dropping creation of ix_connector_schedules_id.

πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
CREATE TABLE IF NOT EXISTS connector_schedules (
id SERIAL PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
connector_id INTEGER NOT NULL REFERENCES search_source_connectors(id) ON DELETE CASCADE,
search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE,
schedule_type scheduletype NOT NULL,
cron_expression VARCHAR(100),
daily_time TIME,
weekly_day SMALLINT,
weekly_time TIME,
hourly_minute SMALLINT,
is_active BOOLEAN NOT NULL DEFAULT TRUE,
last_run_at TIMESTAMPTZ,
next_run_at TIMESTAMPTZ,
CONSTRAINT uq_connector_search_space UNIQUE (connector_id, search_space_id)
);
CREATE TABLE IF NOT EXISTS connector_schedules (
id SERIAL PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
connector_id INTEGER NOT NULL REFERENCES search_source_connectors(id) ON DELETE CASCADE,
search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE,
schedule_type scheduletype NOT NULL,
cron_expression VARCHAR(100),
daily_time TIME,
weekly_day SMALLINT,
weekly_time TIME,
hourly_minute SMALLINT,
is_active BOOLEAN NOT NULL DEFAULT TRUE,
last_run_at TIMESTAMPTZ,
next_run_at TIMESTAMPTZ,
CONSTRAINT uq_connector_search_space UNIQUE (connector_id, search_space_id)
);
πŸ€– Prompt for AI Agents
In surfsense_backend/alembic/versions/23_add_connector_schedules_table.py around
lines 38 to 53, the CREATE TABLE for connector_schedules defines created_at but
omits updated_at which the ORM's TimestampMixin expects; add an updated_at
TIMESTAMPTZ NOT NULL DEFAULT NOW() column (positioned with created_at) so
selects/updates succeed, and optionally remove the explicit
ix_connector_schedules_id index since the PRIMARY KEY already creates an index.

"""
)

# Get existing indexes
conn = op.get_bind()
inspector = inspect(conn)
existing_indexes = [
idx["name"] for idx in inspector.get_indexes("connector_schedules")
]

# Create indexes only if they don't already exist
if "ix_connector_schedules_id" not in existing_indexes:
op.create_index("ix_connector_schedules_id", "connector_schedules", ["id"])
if "ix_connector_schedules_created_at" not in existing_indexes:
op.create_index(
"ix_connector_schedules_created_at", "connector_schedules", ["created_at"]
)
if "ix_connector_schedules_connector_id" not in existing_indexes:
op.create_index(
"ix_connector_schedules_connector_id", "connector_schedules", ["connector_id"]
)
if "ix_connector_schedules_is_active" not in existing_indexes:
op.create_index(
"ix_connector_schedules_is_active", "connector_schedules", ["is_active"]
)
if "ix_connector_schedules_next_run_at" not in existing_indexes:
op.create_index(
"ix_connector_schedules_next_run_at", "connector_schedules", ["next_run_at"]
)


def downgrade() -> None:
"""Downgrade schema - remove connector_schedules table and enum."""

# Drop indexes
op.drop_index("ix_connector_schedules_next_run_at", table_name="connector_schedules")
op.drop_index("ix_connector_schedules_is_active", table_name="connector_schedules")
op.drop_index("ix_connector_schedules_connector_id", table_name="connector_schedules")
op.drop_index("ix_connector_schedules_created_at", table_name="connector_schedules")
op.drop_index("ix_connector_schedules_id", table_name="connector_schedules")

# Drop table
op.drop_table("connector_schedules")

# Drop enum
op.execute("DROP TYPE IF EXISTS scheduletype")

38 changes: 36 additions & 2 deletions surfsense_backend/app/app.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from contextlib import asynccontextmanager
import asyncio
import logging
from contextlib import asynccontextmanager, suppress

from fastapi import Depends, FastAPI
from fastapi.middleware.cors import CORSMiddleware
Expand All @@ -7,15 +9,45 @@
from app.config import config
from app.db import User, create_db_and_tables, get_async_session
from app.routes import router as crud_router
from app.routes.connector_schedules_routes import router as connector_schedules_router
from app.routes.scheduler_routes import router as scheduler_router
from app.schemas import UserCreate, UserRead, UserUpdate
from app.services.connector_scheduler_service import start_scheduler, stop_scheduler
from app.users import SECRET, auth_backend, current_active_user, fastapi_users

logger = logging.getLogger(__name__)


@asynccontextmanager
async def lifespan(app: FastAPI):
# Not needed if you setup a migration system like Alembic
"""Application lifespan manager with scheduler integration."""
# Startup
logger.info("Starting SurfSense application...")

# Create database tables
await create_db_and_tables()
logger.info("Database tables created/verified")

# Start the connector scheduler service
scheduler_task = asyncio.create_task(start_scheduler())
logger.info("Connector scheduler service started")

Comment on lines +31 to +34
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Avoid spawning an unobserved startup task; await and handle errors.

create_task(start_scheduler()) can fail silently; the β€œstarted” log is emitted even if startup fails. Await it and log/propagate errors.

Apply this diff:

-    # Start the connector scheduler service
-    scheduler_task = asyncio.create_task(start_scheduler())
-    logger.info("Connector scheduler service started")
+    # Start the connector scheduler service
+    try:
+        await start_scheduler()
+        logger.info("Connector scheduler service started")
+    except Exception:
+        logger.exception("Failed to start connector scheduler")
+        raise

Committable suggestion skipped: line range outside the PR's diff.

πŸ€– Prompt for AI Agents
In surfsense_backend/app/app.py around lines 31 to 34, the code spawns
start_scheduler() with asyncio.create_task and immediately logs "Connector
scheduler service started", which can hide startup failures; change this to
await start_scheduler() (or await the created task) inside a try/except so
startup errors are caught, log a success message only after await returns, and
on exception log the error and re-raise or handle/propagate it appropriately so
failures are not silent.

yield

# Shutdown
logger.info("Shutting down SurfSense application...")

# Stop the scheduler service
await stop_scheduler()
logger.info("Connector scheduler service stopped")

# Cancel the scheduler task
if not scheduler_task.done():
scheduler_task.cancel()
with suppress(asyncio.CancelledError):
await scheduler_task

logger.info("Application shutdown complete")


app = FastAPI(lifespan=lifespan)
Expand Down Expand Up @@ -65,6 +97,8 @@ async def lifespan(app: FastAPI):
)

app.include_router(crud_router, prefix="/api/v1", tags=["crud"])
app.include_router(connector_schedules_router, prefix="/api/v1", tags=["connector-schedules"])
app.include_router(scheduler_router, prefix="/api/v1", tags=["scheduler"])


@app.get("/verify-token")
Expand Down
18 changes: 17 additions & 1 deletion surfsense_backend/app/connectors/clickup_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
Allows fetching tasks from workspaces and lists.
"""

from datetime import datetime
from typing import Any

import requests
Expand Down Expand Up @@ -168,13 +169,28 @@ def get_tasks_in_date_range(
Tuple containing (tasks list, error message or None)
"""
try:
# TODO : Include date range in api request
# Convert date strings to Unix timestamps (milliseconds)
start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
end_datetime = datetime.strptime(end_date, "%Y-%m-%d")

# Set time to start and end of day for complete coverage
start_datetime = start_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
end_datetime = end_datetime.replace(hour=23, minute=59, second=59, microsecond=999999)

start_timestamp = int(start_datetime.timestamp() * 1000)
end_timestamp = int(end_datetime.timestamp() * 1000)
Comment on lines +172 to +181
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Use UTC timezone explicitly to avoid inconsistent filtering.

The code creates naive datetime objects (without timezone info) and calls .timestamp(), which uses the system's local timezone. This can lead to incorrect date filtering if the server timezone differs from the expected timezone or when handling tasks across timezones.

Apply this diff to use UTC explicitly:

-            # Convert date strings to Unix timestamps (milliseconds)
-            start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
-            end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
-            
-            # Set time to start and end of day for complete coverage
-            start_datetime = start_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
-            end_datetime = end_datetime.replace(hour=23, minute=59, second=59, microsecond=999999)
-            
-            start_timestamp = int(start_datetime.timestamp() * 1000)
-            end_timestamp = int(end_datetime.timestamp() * 1000)
+            # Convert date strings to Unix timestamps (milliseconds) using UTC
+            from datetime import timezone
+            
+            start_datetime = datetime.strptime(start_date, "%Y-%m-%d").replace(
+                hour=0, minute=0, second=0, microsecond=0, tzinfo=timezone.utc
+            )
+            end_datetime = datetime.strptime(end_date, "%Y-%m-%d").replace(
+                hour=23, minute=59, second=59, microsecond=999999, tzinfo=timezone.utc
+            )
+            
+            start_timestamp = int(start_datetime.timestamp() * 1000)
+            end_timestamp = int(end_datetime.timestamp() * 1000)
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Convert date strings to Unix timestamps (milliseconds)
start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
# Set time to start and end of day for complete coverage
start_datetime = start_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
end_datetime = end_datetime.replace(hour=23, minute=59, second=59, microsecond=999999)
start_timestamp = int(start_datetime.timestamp() * 1000)
end_timestamp = int(end_datetime.timestamp() * 1000)
# Convert date strings to Unix timestamps (milliseconds) using UTC
from datetime import timezone
start_datetime = datetime.strptime(start_date, "%Y-%m-%d").replace(
hour=0, minute=0, second=0, microsecond=0, tzinfo=timezone.utc
)
end_datetime = datetime.strptime(end_date, "%Y-%m-%d").replace(
hour=23, minute=59, second=59, microsecond=999999, tzinfo=timezone.utc
)
start_timestamp = int(start_datetime.timestamp() * 1000)
end_timestamp = int(end_datetime.timestamp() * 1000)
πŸ€– Prompt for AI Agents
In surfsense_backend/app/connectors/clickup_connector.py around lines 172 to
181, the code builds naive datetimes and calls .timestamp(), which uses the
system local timezone; make the datetimes timezone-aware in UTC before
converting to timestamps: parse the dates as date objects or datetimes as
currently done, set their time to start/end of day, attach UTC tzinfo (e.g.,
replace or combine with datetime.timezone.utc) so they are aware, then call
.timestamp() to produce consistent Unix millisecond timestamps in UTC.


params = {
"page": 0,
"order_by": "created",
"reverse": "true",
"subtasks": "true",
"include_closed": str(include_closed).lower(),
# Date filtering - filter by both created and updated dates
"date_created_gt": start_timestamp,
"date_created_lt": end_timestamp,
"date_updated_gt": start_timestamp,
"date_updated_lt": end_timestamp,
Comment on lines +189 to +193
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | πŸ”΄ Critical

🧩 Analysis chain

Verify that ClickUp API uses OR logic for multiple date filters.

The implementation sends all four date parameters (date_created_gt/lt AND date_updated_gt/lt) simultaneously. Depending on how the ClickUp API interprets these parameters, this could either:

  1. OR logic (desired): Return tasks created OR updated within the range, enabling proper incremental sync.
  2. AND logic (problematic): Return only tasks where BOTH created AND updated dates fall within the range, missing tasks created before the range but updated within it (or vice versa).

The PR objectives mention Jira uses explicit OR logic. Verify the ClickUp API behavior to ensure incremental synchronization works correctly.

Run the following script to check the ClickUp API documentation and behavior:

Additionally, search the web for ClickUp API documentation on date filtering:


🏁 Script executed:

#!/bin/bash
# Verify ClickUp API date filtering behavior and search for existing integration examples

# Search codebase for any documentation or comments about ClickUp date filter behavior
rg -i "clickup.*date.*filter" -A 5 -B 5

# Search for any tests that might clarify the expected behavior
rg -i "date_created.*date_updated" -A 5 -B 5

Length of output: 0


🌐 Web query:

ClickUp API v2 date filtering parameters date_created_gt date_updated_gt behavior AND OR logic

πŸ’‘ Result:

Short answer

  • date_created_gt and date_updated_gt are Unix‑epoch millisecond filters (strict "greater than") that compare against a task's dateCreated / dateUpdated fields. [1][2][3]
  • When you pass multiple filters (dates, status, assignees, etc.) the API applies them conjunctively (AND) β€” a task must satisfy all supplied filters to be returned. [1][3]

Details / notes

  • Use milliseconds since 1970-01-01 UTC (or stringified millisecond value). The API returns timestamps in UTC. [2]
  • Operators documented: gt (greater than), lt (less than), gte/lte for inclusive comparisons where available. Use date_created_lt / date_updated_lt for upper bounds. [1][3]
  • If you need OR logic (e.g., created after A OR updated after B) you must perform multiple requests and merge results client‑side (no server-side OR across different date fields is documented). [1][3]

Sources

  1. ClickUp developer β€” Filter Views (dateCreated/dateUpdated, operators). [1]
  2. ClickUp developer β€” Date formatting / timestamps (milliseconds, UTC). [2]
  3. ClickUp API task endpoints and community docs referencing date_created_gt / date_updated_gt. [3]

Handle ClickUp date filters with OR logic by splitting requests
ClickUp applies multiple date filters conjunctively (AND), so passing both created and updated bounds returns only tasks matching both ranges, missing tasks where only one date qualifies. Break into two queries (created range AND updated range), then merge and dedupe results client-side.

πŸ€– Prompt for AI Agents
In surfsense_backend/app/connectors/clickup_connector.py around lines 189 to
193, the current request includes both created and updated date bounds which
ClickUp applies as AND; instead, perform two queries: one with only
date_created_gt/lt set to start_timestamp/end_timestamp and another with only
date_updated_gt/lt set to the same timestamps, then combine the two response
lists and deduplicate by task id (e.g., use a dict or set of ids) before
returning so tasks that match either created OR updated ranges are preserved.

}

all_tasks = []
Expand Down
10 changes: 5 additions & 5 deletions surfsense_backend/app/connectors/jira_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,14 +222,14 @@ def get_issues_by_date_range(
# Build JQL query for date range
# Query issues that were either created OR updated within the date range
date_filter = (
f"(createdDate >= '{start_date}' AND createdDate <= '{end_date}')"
f"(createdDate >= '{start_date}' AND createdDate <= '{end_date}') "
f"OR (updatedDate >= '{start_date}' AND updatedDate <= '{end_date}')"
)
# TODO : This JQL needs some improvement to work as expected

_jql = f"{date_filter}"
_jql = f"{date_filter} ORDER BY created DESC"
if project_key:
_jql = (
f'project = "{project_key}" AND {date_filter} ORDER BY created DESC'
f'project = "{project_key}" AND ({date_filter}) ORDER BY created DESC'
)

# Define fields to retrieve
Expand All @@ -250,7 +250,7 @@ def get_issues_by_date_range(
fields.append("comment")

params = {
# "jql": "", TODO : Add a JQL query to filter from a date range
"jql": _jql,
"fields": ",".join(fields),
"maxResults": 100,
"startAt": 0,
Expand Down
65 changes: 51 additions & 14 deletions surfsense_backend/app/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
Integer,
String,
Text,
Time,
UniqueConstraint,
text,
)
Expand Down Expand Up @@ -129,6 +130,13 @@ class LogStatus(str, Enum):
FAILED = "FAILED"


class ScheduleType(str, Enum):
HOURLY = "HOURLY"
DAILY = "DAILY"
WEEKLY = "WEEKLY"
CUSTOM = "CUSTOM"


class Base(DeclarativeBase):
pass

Expand Down Expand Up @@ -245,21 +253,10 @@ class SearchSpace(BaseModel, TimestampMixin):
order_by="Log.id",
cascade="all, delete-orphan",
)
search_source_connectors = relationship(
"SearchSourceConnector",
back_populates="search_space",
order_by="SearchSourceConnector.id",
cascade="all, delete-orphan",
)
llm_configs = relationship(
"LLMConfig",
back_populates="search_space",
order_by="LLMConfig.id",
cascade="all, delete-orphan",
)
user_preferences = relationship(
"UserSearchSpacePreference",
connector_schedules = relationship(
"ConnectorSchedule",
back_populates="search_space",
order_by="ConnectorSchedule.id",
cascade="all, delete-orphan",
)
Comment on lines +256 to 261
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | πŸ”΄ Critical

Fix broken SQLAlchemy relationships (missing back_populates)

Several relationships reference back_populates that don’t exist, causing mapper configuration errors at startup.

Apply these additions:

@@ class SearchSpace(BaseModel, TimestampMixin):
     logs = relationship(
         "Log",
         back_populates="search_space",
         order_by="Log.id",
         cascade="all, delete-orphan",
     )
+    # Add missing relationships for back_populates used elsewhere
+    search_source_connectors = relationship(
+        "SearchSourceConnector",
+        back_populates="search_space",
+        order_by="SearchSourceConnector.id",
+        cascade="all, delete-orphan",
+    )
     connector_schedules = relationship(
         "ConnectorSchedule",
         back_populates="search_space",
         order_by="ConnectorSchedule.id",
         cascade="all, delete-orphan",
     )
+    llm_configs = relationship(
+        "LLMConfig",
+        back_populates="search_space",
+        order_by="LLMConfig.id",
+        cascade="all, delete-orphan",
+    )
+    user_preferences = relationship(
+        "UserSearchSpacePreference",
+        back_populates="search_space",
+        order_by="UserSearchSpacePreference.id",
+        cascade="all, delete-orphan",
+    )
@@ class SearchSourceConnector(BaseModel, TimestampMixin):
-    search_space = relationship(
-        "SearchSpace", back_populates="search_source_connectors"
-    )
+    search_space = relationship("SearchSpace", back_populates="search_source_connectors")
@@ class LLMConfig(BaseModel, TimestampMixin):
-    search_space = relationship("SearchSpace", back_populates="llm_configs")
+    search_space = relationship("SearchSpace", back_populates="llm_configs")
@@ class UserSearchSpacePreference(BaseModel, TimestampMixin):
-    search_space = relationship("SearchSpace", back_populates="user_preferences")
+    search_space = relationship("SearchSpace", back_populates="user_preferences")
@@ class User(SQLAlchemyBaseUserTableUUID, Base):  # GOOGLE path
         search_spaces = relationship("SearchSpace", back_populates="user")
         search_space_preferences = relationship(
             "UserSearchSpacePreference",
             back_populates="user",
             cascade="all, delete-orphan",
         )
+        search_source_connectors = relationship(
+            "SearchSourceConnector",
+            back_populates="user",
+        )
@@ else path: class User(SQLAlchemyBaseUserTableUUID, Base):
         search_spaces = relationship("SearchSpace", back_populates="user")
         search_space_preferences = relationship(
             "UserSearchSpacePreference",
             back_populates="user",
             cascade="all, delete-orphan",
         )
+        search_source_connectors = relationship(
+            "SearchSourceConnector",
+            back_populates="user",
+        )

Also applies to: 284-291, 333-356, 391-392, 424-444

πŸ€– Prompt for AI Agents
In surfsense_backend/app/db.py around lines 256-261 (and similarly for ranges
284-291, 333-356, 391-392, 424-444), several relationship() calls reference
back_populates names that are missing on the related models; add the reciprocal
relationship attributes to those related ORM classes using the exact
back_populates string used here, matching relationship parameters (e.g., class
name, back_populates value, order_by/cascade where appropriate) so each
relationship has a corresponding partner β€” ensure names and casing match, set
backref behavior consistently (use relationship on both sides with
back_populates, not backref), and include any order_by or cascade clauses on the
appropriate side to mirror the original intended behavior.


Expand Down Expand Up @@ -291,6 +288,46 @@ class SearchSourceConnector(BaseModel, TimestampMixin):
user_id = Column(
UUID(as_uuid=True), ForeignKey("user.id", ondelete="CASCADE"), nullable=False
)
user = relationship("User", back_populates="search_source_connectors")
schedules = relationship(
"ConnectorSchedule",
back_populates="connector",
cascade="all, delete-orphan",
)


class ConnectorSchedule(BaseModel, TimestampMixin):
__tablename__ = "connector_schedules"
__table_args__ = (
UniqueConstraint(
"connector_id", "search_space_id", name="uq_connector_search_space"
),
)

connector_id = Column(
Integer,
ForeignKey("search_source_connectors.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
search_space_id = Column(
Integer,
ForeignKey("searchspaces.id", ondelete="CASCADE"),
nullable=False,
)
schedule_type = Column(SQLAlchemyEnum(ScheduleType), nullable=False)
cron_expression = Column(String(100), nullable=True)
# Optional schedule config fields (persist user selections)
daily_time = Column(Time(timezone=False), nullable=True)
weekly_day = Column(Integer, nullable=True) # 0=Mon .. 6=Sun
weekly_time = Column(Time(timezone=False), nullable=True)
hourly_minute = Column(Integer, nullable=True) # 0..59
is_active = Column(Boolean, nullable=False, default=True, index=True)
last_run_at = Column(TIMESTAMP(timezone=True), nullable=True)
next_run_at = Column(TIMESTAMP(timezone=True), nullable=True, index=True)

connector = relationship("SearchSourceConnector", back_populates="schedules")
search_space = relationship("SearchSpace", back_populates="connector_schedules")


class LLMConfig(BaseModel, TimestampMixin):
Expand Down
Loading
Loading