-
-
Notifications
You must be signed in to change notification settings - Fork 859
bugs(fix):Fixed broken data range filtering #407
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. Weβll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
a51fcce
b870ddb
6fa1a00
97f73ae
fc4f677
7391a34
62ffec9
c044d42
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,100 @@ | ||
| """Add connector schedules table | ||
| Revision ID: 23 | ||
| Revises: 22 | ||
| """ | ||
|
|
||
| from collections.abc import Sequence | ||
|
|
||
| from sqlalchemy import inspect | ||
|
|
||
| from alembic import op | ||
|
|
||
| # revision identifiers, used by Alembic. | ||
| revision: str = "23" | ||
| down_revision: str | None = "22" | ||
| branch_labels: str | Sequence[str] | None = None | ||
| depends_on: str | Sequence[str] | None = None | ||
|
|
||
|
|
||
| def upgrade() -> None: | ||
| """Upgrade schema - add ScheduleType enum and connector_schedules table.""" | ||
|
|
||
| # Create ScheduleType enum if it doesn't exist | ||
| op.execute( | ||
| """ | ||
| DO $$ | ||
| BEGIN | ||
| IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'scheduletype') THEN | ||
| CREATE TYPE scheduletype AS ENUM ('HOURLY', 'DAILY', 'WEEKLY', 'CUSTOM'); | ||
| END IF; | ||
| END$$; | ||
| """ | ||
| ) | ||
|
|
||
| # Create connector_schedules table if it doesn't exist | ||
| op.execute( | ||
| """ | ||
| CREATE TABLE IF NOT EXISTS connector_schedules ( | ||
| id SERIAL PRIMARY KEY, | ||
| created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), | ||
| connector_id INTEGER NOT NULL REFERENCES search_source_connectors(id) ON DELETE CASCADE, | ||
| search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE, | ||
| schedule_type scheduletype NOT NULL, | ||
| cron_expression VARCHAR(100), | ||
| daily_time TIME, | ||
| weekly_day SMALLINT, | ||
| weekly_time TIME, | ||
| hourly_minute SMALLINT, | ||
| is_active BOOLEAN NOT NULL DEFAULT TRUE, | ||
| last_run_at TIMESTAMPTZ, | ||
| next_run_at TIMESTAMPTZ, | ||
| CONSTRAINT uq_connector_search_space UNIQUE (connector_id, search_space_id) | ||
| ); | ||
| """ | ||
| ) | ||
|
|
||
| # Get existing indexes | ||
| conn = op.get_bind() | ||
| inspector = inspect(conn) | ||
| existing_indexes = [ | ||
| idx["name"] for idx in inspector.get_indexes("connector_schedules") | ||
| ] | ||
|
|
||
| # Create indexes only if they don't already exist | ||
| if "ix_connector_schedules_id" not in existing_indexes: | ||
| op.create_index("ix_connector_schedules_id", "connector_schedules", ["id"]) | ||
| if "ix_connector_schedules_created_at" not in existing_indexes: | ||
| op.create_index( | ||
| "ix_connector_schedules_created_at", "connector_schedules", ["created_at"] | ||
| ) | ||
| if "ix_connector_schedules_connector_id" not in existing_indexes: | ||
| op.create_index( | ||
| "ix_connector_schedules_connector_id", "connector_schedules", ["connector_id"] | ||
| ) | ||
| if "ix_connector_schedules_is_active" not in existing_indexes: | ||
| op.create_index( | ||
| "ix_connector_schedules_is_active", "connector_schedules", ["is_active"] | ||
| ) | ||
| if "ix_connector_schedules_next_run_at" not in existing_indexes: | ||
| op.create_index( | ||
| "ix_connector_schedules_next_run_at", "connector_schedules", ["next_run_at"] | ||
| ) | ||
|
|
||
|
|
||
| def downgrade() -> None: | ||
| """Downgrade schema - remove connector_schedules table and enum.""" | ||
|
|
||
| # Drop indexes | ||
| op.drop_index("ix_connector_schedules_next_run_at", table_name="connector_schedules") | ||
| op.drop_index("ix_connector_schedules_is_active", table_name="connector_schedules") | ||
| op.drop_index("ix_connector_schedules_connector_id", table_name="connector_schedules") | ||
| op.drop_index("ix_connector_schedules_created_at", table_name="connector_schedules") | ||
| op.drop_index("ix_connector_schedules_id", table_name="connector_schedules") | ||
|
|
||
| # Drop table | ||
| op.drop_table("connector_schedules") | ||
|
|
||
| # Drop enum | ||
| op.execute("DROP TYPE IF EXISTS scheduletype") | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,4 +1,6 @@ | ||
| from contextlib import asynccontextmanager | ||
| import asyncio | ||
| import logging | ||
| from contextlib import asynccontextmanager, suppress | ||
|
|
||
| from fastapi import Depends, FastAPI | ||
| from fastapi.middleware.cors import CORSMiddleware | ||
|
|
@@ -7,15 +9,45 @@ | |
| from app.config import config | ||
| from app.db import User, create_db_and_tables, get_async_session | ||
| from app.routes import router as crud_router | ||
| from app.routes.connector_schedules_routes import router as connector_schedules_router | ||
| from app.routes.scheduler_routes import router as scheduler_router | ||
| from app.schemas import UserCreate, UserRead, UserUpdate | ||
| from app.services.connector_scheduler_service import start_scheduler, stop_scheduler | ||
| from app.users import SECRET, auth_backend, current_active_user, fastapi_users | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
|
|
||
| @asynccontextmanager | ||
| async def lifespan(app: FastAPI): | ||
| # Not needed if you setup a migration system like Alembic | ||
| """Application lifespan manager with scheduler integration.""" | ||
| # Startup | ||
| logger.info("Starting SurfSense application...") | ||
|
|
||
| # Create database tables | ||
| await create_db_and_tables() | ||
| logger.info("Database tables created/verified") | ||
|
|
||
| # Start the connector scheduler service | ||
| scheduler_task = asyncio.create_task(start_scheduler()) | ||
| logger.info("Connector scheduler service started") | ||
|
|
||
|
Comment on lines
+31
to
+34
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Avoid spawning an unobserved startup task; await and handle errors. create_task(start_scheduler()) can fail silently; the βstartedβ log is emitted even if startup fails. Await it and log/propagate errors. Apply this diff: - # Start the connector scheduler service
- scheduler_task = asyncio.create_task(start_scheduler())
- logger.info("Connector scheduler service started")
+ # Start the connector scheduler service
+ try:
+ await start_scheduler()
+ logger.info("Connector scheduler service started")
+ except Exception:
+ logger.exception("Failed to start connector scheduler")
+ raise
π€ Prompt for AI Agents |
||
| yield | ||
|
|
||
| # Shutdown | ||
| logger.info("Shutting down SurfSense application...") | ||
|
|
||
| # Stop the scheduler service | ||
| await stop_scheduler() | ||
| logger.info("Connector scheduler service stopped") | ||
|
|
||
| # Cancel the scheduler task | ||
| if not scheduler_task.done(): | ||
| scheduler_task.cancel() | ||
| with suppress(asyncio.CancelledError): | ||
| await scheduler_task | ||
|
|
||
| logger.info("Application shutdown complete") | ||
|
|
||
|
|
||
| app = FastAPI(lifespan=lifespan) | ||
|
|
@@ -65,6 +97,8 @@ async def lifespan(app: FastAPI): | |
| ) | ||
|
|
||
| app.include_router(crud_router, prefix="/api/v1", tags=["crud"]) | ||
| app.include_router(connector_schedules_router, prefix="/api/v1", tags=["connector-schedules"]) | ||
| app.include_router(scheduler_router, prefix="/api/v1", tags=["scheduler"]) | ||
|
|
||
|
|
||
| @app.get("/verify-token") | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -5,6 +5,7 @@ | |||||||||||||||||||||||||||||||||||||||||||||
| Allows fetching tasks from workspaces and lists. | ||||||||||||||||||||||||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||
| from datetime import datetime | ||||||||||||||||||||||||||||||||||||||||||||||
| from typing import Any | ||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||
| import requests | ||||||||||||||||||||||||||||||||||||||||||||||
|
|
@@ -168,13 +169,28 @@ def get_tasks_in_date_range( | |||||||||||||||||||||||||||||||||||||||||||||
| Tuple containing (tasks list, error message or None) | ||||||||||||||||||||||||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||||||||||||||||||||||||
| try: | ||||||||||||||||||||||||||||||||||||||||||||||
| # TODO : Include date range in api request | ||||||||||||||||||||||||||||||||||||||||||||||
| # Convert date strings to Unix timestamps (milliseconds) | ||||||||||||||||||||||||||||||||||||||||||||||
| start_datetime = datetime.strptime(start_date, "%Y-%m-%d") | ||||||||||||||||||||||||||||||||||||||||||||||
| end_datetime = datetime.strptime(end_date, "%Y-%m-%d") | ||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||
| # Set time to start and end of day for complete coverage | ||||||||||||||||||||||||||||||||||||||||||||||
| start_datetime = start_datetime.replace(hour=0, minute=0, second=0, microsecond=0) | ||||||||||||||||||||||||||||||||||||||||||||||
| end_datetime = end_datetime.replace(hour=23, minute=59, second=59, microsecond=999999) | ||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||
| start_timestamp = int(start_datetime.timestamp() * 1000) | ||||||||||||||||||||||||||||||||||||||||||||||
| end_timestamp = int(end_datetime.timestamp() * 1000) | ||||||||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+172
to
+181
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Use UTC timezone explicitly to avoid inconsistent filtering. The code creates naive datetime objects (without timezone info) and calls Apply this diff to use UTC explicitly: - # Convert date strings to Unix timestamps (milliseconds)
- start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
- end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
-
- # Set time to start and end of day for complete coverage
- start_datetime = start_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
- end_datetime = end_datetime.replace(hour=23, minute=59, second=59, microsecond=999999)
-
- start_timestamp = int(start_datetime.timestamp() * 1000)
- end_timestamp = int(end_datetime.timestamp() * 1000)
+ # Convert date strings to Unix timestamps (milliseconds) using UTC
+ from datetime import timezone
+
+ start_datetime = datetime.strptime(start_date, "%Y-%m-%d").replace(
+ hour=0, minute=0, second=0, microsecond=0, tzinfo=timezone.utc
+ )
+ end_datetime = datetime.strptime(end_date, "%Y-%m-%d").replace(
+ hour=23, minute=59, second=59, microsecond=999999, tzinfo=timezone.utc
+ )
+
+ start_timestamp = int(start_datetime.timestamp() * 1000)
+ end_timestamp = int(end_datetime.timestamp() * 1000)π Committable suggestion
Suggested change
π€ Prompt for AI Agents |
||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||
| params = { | ||||||||||||||||||||||||||||||||||||||||||||||
| "page": 0, | ||||||||||||||||||||||||||||||||||||||||||||||
| "order_by": "created", | ||||||||||||||||||||||||||||||||||||||||||||||
| "reverse": "true", | ||||||||||||||||||||||||||||||||||||||||||||||
| "subtasks": "true", | ||||||||||||||||||||||||||||||||||||||||||||||
| "include_closed": str(include_closed).lower(), | ||||||||||||||||||||||||||||||||||||||||||||||
| # Date filtering - filter by both created and updated dates | ||||||||||||||||||||||||||||||||||||||||||||||
| "date_created_gt": start_timestamp, | ||||||||||||||||||||||||||||||||||||||||||||||
| "date_created_lt": end_timestamp, | ||||||||||||||||||||||||||||||||||||||||||||||
| "date_updated_gt": start_timestamp, | ||||||||||||||||||||||||||||||||||||||||||||||
| "date_updated_lt": end_timestamp, | ||||||||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+189
to
+193
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. π§© Analysis chainVerify that ClickUp API uses OR logic for multiple date filters. The implementation sends all four date parameters (date_created_gt/lt AND date_updated_gt/lt) simultaneously. Depending on how the ClickUp API interprets these parameters, this could either:
The PR objectives mention Jira uses explicit OR logic. Verify the ClickUp API behavior to ensure incremental synchronization works correctly. Run the following script to check the ClickUp API documentation and behavior: Additionally, search the web for ClickUp API documentation on date filtering: π Script executed: #!/bin/bash
# Verify ClickUp API date filtering behavior and search for existing integration examples
# Search codebase for any documentation or comments about ClickUp date filter behavior
rg -i "clickup.*date.*filter" -A 5 -B 5
# Search for any tests that might clarify the expected behavior
rg -i "date_created.*date_updated" -A 5 -B 5Length of output: 0 π Web query: π‘ Result: Short answer
Details / notes
Sources
Handle ClickUp date filters with OR logic by splitting requests π€ Prompt for AI Agents |
||||||||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||
| all_tasks = [] | ||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -16,6 +16,7 @@ | |
| Integer, | ||
| String, | ||
| Text, | ||
| Time, | ||
| UniqueConstraint, | ||
| text, | ||
| ) | ||
|
|
@@ -129,6 +130,13 @@ class LogStatus(str, Enum): | |
| FAILED = "FAILED" | ||
|
|
||
|
|
||
| class ScheduleType(str, Enum): | ||
| HOURLY = "HOURLY" | ||
| DAILY = "DAILY" | ||
| WEEKLY = "WEEKLY" | ||
| CUSTOM = "CUSTOM" | ||
|
|
||
|
|
||
| class Base(DeclarativeBase): | ||
| pass | ||
|
|
||
|
|
@@ -245,21 +253,10 @@ class SearchSpace(BaseModel, TimestampMixin): | |
| order_by="Log.id", | ||
| cascade="all, delete-orphan", | ||
| ) | ||
| search_source_connectors = relationship( | ||
| "SearchSourceConnector", | ||
| back_populates="search_space", | ||
| order_by="SearchSourceConnector.id", | ||
| cascade="all, delete-orphan", | ||
| ) | ||
| llm_configs = relationship( | ||
| "LLMConfig", | ||
| back_populates="search_space", | ||
| order_by="LLMConfig.id", | ||
| cascade="all, delete-orphan", | ||
| ) | ||
| user_preferences = relationship( | ||
| "UserSearchSpacePreference", | ||
| connector_schedules = relationship( | ||
| "ConnectorSchedule", | ||
| back_populates="search_space", | ||
| order_by="ConnectorSchedule.id", | ||
| cascade="all, delete-orphan", | ||
| ) | ||
|
Comment on lines
+256
to
261
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fix broken SQLAlchemy relationships (missing back_populates) Several relationships reference back_populates that donβt exist, causing mapper configuration errors at startup. Apply these additions: @@ class SearchSpace(BaseModel, TimestampMixin):
logs = relationship(
"Log",
back_populates="search_space",
order_by="Log.id",
cascade="all, delete-orphan",
)
+ # Add missing relationships for back_populates used elsewhere
+ search_source_connectors = relationship(
+ "SearchSourceConnector",
+ back_populates="search_space",
+ order_by="SearchSourceConnector.id",
+ cascade="all, delete-orphan",
+ )
connector_schedules = relationship(
"ConnectorSchedule",
back_populates="search_space",
order_by="ConnectorSchedule.id",
cascade="all, delete-orphan",
)
+ llm_configs = relationship(
+ "LLMConfig",
+ back_populates="search_space",
+ order_by="LLMConfig.id",
+ cascade="all, delete-orphan",
+ )
+ user_preferences = relationship(
+ "UserSearchSpacePreference",
+ back_populates="search_space",
+ order_by="UserSearchSpacePreference.id",
+ cascade="all, delete-orphan",
+ )
@@ class SearchSourceConnector(BaseModel, TimestampMixin):
- search_space = relationship(
- "SearchSpace", back_populates="search_source_connectors"
- )
+ search_space = relationship("SearchSpace", back_populates="search_source_connectors")
@@ class LLMConfig(BaseModel, TimestampMixin):
- search_space = relationship("SearchSpace", back_populates="llm_configs")
+ search_space = relationship("SearchSpace", back_populates="llm_configs")
@@ class UserSearchSpacePreference(BaseModel, TimestampMixin):
- search_space = relationship("SearchSpace", back_populates="user_preferences")
+ search_space = relationship("SearchSpace", back_populates="user_preferences")
@@ class User(SQLAlchemyBaseUserTableUUID, Base): # GOOGLE path
search_spaces = relationship("SearchSpace", back_populates="user")
search_space_preferences = relationship(
"UserSearchSpacePreference",
back_populates="user",
cascade="all, delete-orphan",
)
+ search_source_connectors = relationship(
+ "SearchSourceConnector",
+ back_populates="user",
+ )
@@ else path: class User(SQLAlchemyBaseUserTableUUID, Base):
search_spaces = relationship("SearchSpace", back_populates="user")
search_space_preferences = relationship(
"UserSearchSpacePreference",
back_populates="user",
cascade="all, delete-orphan",
)
+ search_source_connectors = relationship(
+ "SearchSourceConnector",
+ back_populates="user",
+ )Also applies to: 284-291, 333-356, 391-392, 424-444 π€ Prompt for AI Agents |
||
|
|
||
|
|
@@ -291,6 +288,46 @@ class SearchSourceConnector(BaseModel, TimestampMixin): | |
| user_id = Column( | ||
| UUID(as_uuid=True), ForeignKey("user.id", ondelete="CASCADE"), nullable=False | ||
| ) | ||
| user = relationship("User", back_populates="search_source_connectors") | ||
| schedules = relationship( | ||
| "ConnectorSchedule", | ||
| back_populates="connector", | ||
| cascade="all, delete-orphan", | ||
| ) | ||
|
|
||
|
|
||
| class ConnectorSchedule(BaseModel, TimestampMixin): | ||
| __tablename__ = "connector_schedules" | ||
| __table_args__ = ( | ||
| UniqueConstraint( | ||
| "connector_id", "search_space_id", name="uq_connector_search_space" | ||
| ), | ||
| ) | ||
|
|
||
| connector_id = Column( | ||
| Integer, | ||
| ForeignKey("search_source_connectors.id", ondelete="CASCADE"), | ||
| nullable=False, | ||
| index=True, | ||
| ) | ||
| search_space_id = Column( | ||
| Integer, | ||
| ForeignKey("searchspaces.id", ondelete="CASCADE"), | ||
| nullable=False, | ||
| ) | ||
| schedule_type = Column(SQLAlchemyEnum(ScheduleType), nullable=False) | ||
| cron_expression = Column(String(100), nullable=True) | ||
| # Optional schedule config fields (persist user selections) | ||
| daily_time = Column(Time(timezone=False), nullable=True) | ||
| weekly_day = Column(Integer, nullable=True) # 0=Mon .. 6=Sun | ||
| weekly_time = Column(Time(timezone=False), nullable=True) | ||
| hourly_minute = Column(Integer, nullable=True) # 0..59 | ||
| is_active = Column(Boolean, nullable=False, default=True, index=True) | ||
| last_run_at = Column(TIMESTAMP(timezone=True), nullable=True) | ||
| next_run_at = Column(TIMESTAMP(timezone=True), nullable=True, index=True) | ||
|
|
||
| connector = relationship("SearchSourceConnector", back_populates="schedules") | ||
| search_space = relationship("SearchSpace", back_populates="connector_schedules") | ||
|
|
||
|
|
||
| class LLMConfig(BaseModel, TimestampMixin): | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Add missing updated_at column to match ORM TimestampMixin
The ConnectorSchedule model inherits TimestampMixin (created_at, updated_at). Table DDL creates created_at but omits updated_at, which will cause runtime SQL errors (column not found) when ORM selects/updates it.
Apply this diff to add updated_at:
CREATE TABLE IF NOT EXISTS connector_schedules ( id SERIAL PRIMARY KEY, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), connector_id INTEGER NOT NULL REFERENCES search_source_connectors(id) ON DELETE CASCADE, search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE, schedule_type scheduletype NOT NULL,Optional: the explicit index on id is redundant (PK already indexed). Consider dropping creation of ix_connector_schedules_id.
π Committable suggestion
π€ Prompt for AI Agents