diff --git a/surfsense_backend/README.md b/surfsense_backend/README.md deleted file mode 100644 index f78ec7df..00000000 --- a/surfsense_backend/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# Surf Backend - -## Technology Stack Overview - -This application is a modern AI-powered search and knowledge management platform built with the following technology stack: - -### Core Framework and Environment -- **Python 3.12+**: The application requires Python 3.12 or newer -- **FastAPI**: Modern, fast web framework for building APIs with Python -- **Uvicorn**: ASGI server implementation, running the FastAPI application -- **PostgreSQL with pgvector**: Database with vector search capabilities for similarity searches -- **SQLAlchemy**: SQL toolkit and ORM (Object-Relational Mapping) for database interactions -- **FastAPI Users**: Authentication and user management with JWT and OAuth support - -### Key Features and Components - -#### Authentication and User Management -- JWT-based authentication -- OAuth integration (Google) -- User registration, login, and password reset flows - -#### Search and Retrieval System -- **Hybrid Search**: Combines vector similarity and full-text search for optimal results using Reciprocal Rank Fusion (RRF) -- **Vector Embeddings**: Document and text embeddings for semantic search -- **pgvector**: PostgreSQL extension for efficient vector similarity operations -- **Chonkie**: Advanced document chunking and embedding library - - Uses `AutoEmbeddings` for flexible embedding model selection - - `LateChunker` for optimized document chunking based on embedding model's max sequence length - -#### AI and NLP Capabilities -- **LangChain**: Framework for developing AI-powered applications - - Used for document processing, research, and response generation - - Integration with various LLM models through LiteLLM - - Document conversion utilities for standardized processing -- **GPT Integration**: Integration with LLM models through LiteLLM - - Multiple LLM configurations for different use cases: - - Fast LLM: Quick responses (default: gpt-4o-mini) - - Smart LLM: More comprehensive analysis (default: gpt-4o-mini) - - Strategic LLM: Complex reasoning (default: gpt-4o-mini) - - Long Context LLM: For processing large documents (default: gemini-2.0-flash-thinking) -- **Rerankers with FlashRank**: Advanced result ranking for improved search relevance - - Configurable reranking models (default: ms-marco-MiniLM-L-12-v2) - - Supports multiple reranking backends (FlashRank, Cohere, etc.) - - Improves search result quality by reordering based on semantic relevance -- **GPT-Researcher**: Advanced research capabilities - - Multiple research modes (GENERAL, DEEP, DEEPER) - - Customizable report formats with proper citations - - Streaming research results for real-time updates - -#### External Integrations -- **Slack Connector**: Integration with Slack for data retrieval and notifications -- **Notion Connector**: Integration with Notion for document retrieval -- **Search APIs**: Integration with Tavily and Serper API for web search -- **Firecrawl**: Web crawling and data extraction capabilities - -#### Data Processing -- **Unstructured**: Tools for processing unstructured data -- **Markdownify**: Converting HTML to Markdown -- **Playwright**: Web automation and scraping capabilities - -#### Main Modules -- **Search Spaces**: Isolated search environments for different contexts or projects -- **Documents**: Storage and retrieval of various document types -- **Chunks**: Document fragments for more precise retrieval -- **Chats**: Conversation management with different depth levels (GENERAL, DEEP) -- **Podcasts**: Audio content management with generation capabilities -- **Search Source Connectors**: Integration with various data sources - -### Development Tools -- **Poetry**: Python dependency management (indicated by pyproject.toml) -- **CORS support**: Cross-Origin Resource Sharing enabled for API access -- **Environment Variables**: Configuration through .env files - -## Database Schema - -The application uses a relational database with the following main entities: -- Users: Authentication and user management -- SearchSpaces: Isolated search environments owned by users -- Documents: Various document types with content and embeddings -- Chunks: Smaller pieces of documents for granular retrieval -- Chats: Conversation tracking with different depth levels -- Podcasts: Audio content with generation capabilities -- SearchSourceConnectors: External data source integrations - -## API Endpoints - -The API is structured with the following main route groups: -- `/auth/*`: Authentication endpoints (JWT, OAuth) -- `/users/*`: User management -- `/api/v1/search-spaces/*`: Search space management -- `/api/v1/documents/*`: Document management -- `/api/v1/podcasts/*`: Podcast functionality -- `/api/v1/chats/*`: Chat and conversation endpoints -- `/api/v1/search-source-connectors/*`: External data source management - -## Deployment - -The application is configured to run with Uvicorn and can be deployed with: -``` -python main.py -``` - -This will start the server on all interfaces (0.0.0.0) with info-level logging. - -## Requirements - -See pyproject.toml for detailed dependency information. Key dependencies include: -- asyncpg: Asynchronous PostgreSQL client -- chonkie: Document chunking and embedding library -- fastapi and related packages -- fastapi-users: Authentication and user management -- firecrawl-py: Web crawling capabilities -- langchain components for AI workflows -- litellm: LLM model integration -- pgvector: Vector similarity search in PostgreSQL -- rerankers with FlashRank: Advanced result ranking -- Various AI and NLP libraries -- Integration clients for Slack, Notion, etc. diff --git a/surfsense_backend/app/agents/researcher/configuration.py b/surfsense_backend/app/agents/researcher/configuration.py index 24d8c819..157d56b2 100644 --- a/surfsense_backend/app/agents/researcher/configuration.py +++ b/surfsense_backend/app/agents/researcher/configuration.py @@ -15,27 +15,16 @@ class SearchMode(Enum): DOCUMENTS = "DOCUMENTS" -class ResearchMode(Enum): - """Enum defining the type of research mode.""" - - QNA = "QNA" - REPORT_GENERAL = "REPORT_GENERAL" - REPORT_DEEP = "REPORT_DEEP" - REPORT_DEEPER = "REPORT_DEEPER" - - @dataclass(kw_only=True) class Configuration: """The configuration for the agent.""" # Input parameters provided at invocation user_query: str - num_sections: int connectors_to_search: list[str] user_id: str search_space_id: int search_mode: SearchMode - research_mode: ResearchMode document_ids_to_add_in_context: list[int] language: str | None = None diff --git a/surfsense_backend/app/agents/researcher/graph.py b/surfsense_backend/app/agents/researcher/graph.py index b3ffadd1..be2a1cff 100644 --- a/surfsense_backend/app/agents/researcher/graph.py +++ b/surfsense_backend/app/agents/researcher/graph.py @@ -1,34 +1,23 @@ -from typing import Any, TypedDict - from langgraph.graph import StateGraph -from .configuration import Configuration, ResearchMode +from .configuration import Configuration from .nodes import ( generate_further_questions, handle_qna_workflow, - process_sections, reformulate_user_query, - write_answer_outline, ) from .state import State -# Define what keys are in our state dict -class GraphState(TypedDict): - # Intermediate data produced during workflow - answer_outline: Any | None - # Final output - final_written_report: str | None - - def build_graph(): """ Build and return the LangGraph workflow. - This function constructs the researcher agent graph with conditional routing - based on research_mode - QNA mode uses a direct Q&A workflow while other modes - use the full report generation pipeline. Both paths generate follow-up questions - at the end using the reranked documents from the sub-agents. + This function constructs the researcher agent graph for Q&A workflow. + The workflow follows a simple path: + 1. Reformulate user query based on chat history + 2. Handle QNA workflow (fetch documents and generate answer) + 3. Generate follow-up questions Returns: A compiled LangGraph workflow @@ -39,40 +28,12 @@ def build_graph(): # Add nodes to the graph workflow.add_node("reformulate_user_query", reformulate_user_query) workflow.add_node("handle_qna_workflow", handle_qna_workflow) - workflow.add_node("write_answer_outline", write_answer_outline) - workflow.add_node("process_sections", process_sections) workflow.add_node("generate_further_questions", generate_further_questions) - # Define the edges + # Define the edges - simple linear flow for QNA workflow.add_edge("__start__", "reformulate_user_query") - - # Add conditional edges from reformulate_user_query based on research mode - def route_after_reformulate(state: State, config) -> str: - """Route based on research_mode after reformulating the query.""" - configuration = Configuration.from_runnable_config(config) - - if configuration.research_mode == ResearchMode.QNA.value: - return "handle_qna_workflow" - else: - return "write_answer_outline" - - workflow.add_conditional_edges( - "reformulate_user_query", - route_after_reformulate, - { - "handle_qna_workflow": "handle_qna_workflow", - "write_answer_outline": "write_answer_outline", - }, - ) - - # QNA workflow path: handle_qna_workflow -> generate_further_questions -> __end__ + workflow.add_edge("reformulate_user_query", "handle_qna_workflow") workflow.add_edge("handle_qna_workflow", "generate_further_questions") - - # Report generation workflow path: write_answer_outline -> process_sections -> generate_further_questions -> __end__ - workflow.add_edge("write_answer_outline", "process_sections") - workflow.add_edge("process_sections", "generate_further_questions") - - # Both paths end after generating further questions workflow.add_edge("generate_further_questions", "__end__") # Compile the workflow into an executable graph diff --git a/surfsense_backend/app/agents/researcher/nodes.py b/surfsense_backend/app/agents/researcher/nodes.py index 7824e06e..6ac31a0f 100644 --- a/surfsense_backend/app/agents/researcher/nodes.py +++ b/surfsense_backend/app/agents/researcher/nodes.py @@ -1,4 +1,3 @@ -import asyncio import json import logging import traceback @@ -17,15 +16,10 @@ from app.services.query_service import QueryService from .configuration import Configuration, SearchMode -from .prompts import ( - get_answer_outline_system_prompt, - get_further_questions_system_prompt, -) +from .prompts import get_further_questions_system_prompt from .qna_agent.graph import graph as qna_agent_graph from .state import State -from .sub_section_writer.configuration import SubSectionType -from .sub_section_writer.graph import graph as sub_section_writer_graph -from .utils import AnswerOutline, get_connector_emoji, get_connector_friendly_name +from .utils import get_connector_emoji, get_connector_friendly_name def extract_sources_from_documents( @@ -519,156 +513,6 @@ async def fetch_documents_by_ids( return [], [] -async def write_answer_outline( - state: State, config: RunnableConfig, writer: StreamWriter -) -> dict[str, Any]: - """ - Create a structured answer outline based on the user query. - - This node takes the user query and number of sections from the configuration and uses - an LLM to generate a comprehensive outline with logical sections and research questions - for each section. - - Returns: - Dict containing the answer outline in the "answer_outline" key for state update. - """ - from app.services.llm_service import get_user_strategic_llm - - streaming_service = state.streaming_service - - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - "🔍 Generating answer outline..." - ) - } - ) - # Get configuration from runnable config - configuration = Configuration.from_runnable_config(config) - reformulated_query = state.reformulated_query - user_query = configuration.user_query - num_sections = configuration.num_sections - user_id = configuration.user_id - search_space_id = configuration.search_space_id - language = configuration.language # Get language from configuration - - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - f'🤔 Planning research approach for: "{user_query[:100]}..."' - ) - } - ) - - # Get user's strategic LLM - llm = await get_user_strategic_llm(state.db_session, user_id, search_space_id) - if not llm: - error_message = f"No strategic LLM configured for user {user_id} in search space {search_space_id}" - writer({"yield_value": streaming_service.format_error(error_message)}) - raise RuntimeError(error_message) - - # Create the human message content - human_message_content = f""" - Now Please create an answer outline for the following query: - - User Query: {reformulated_query} - Number of Sections: {num_sections} - - Remember to format your response as valid JSON exactly matching this structure: - {{ - "answer_outline": [ - {{ - "section_id": 0, - "section_title": "Section Title", - "questions": [ - "Question 1 to research for this section", - "Question 2 to research for this section" - ] - }} - ] - }} - - Your output MUST be valid JSON in exactly this format. Do not include any other text or explanation. - """ - - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - "📝 Designing structured outline with AI..." - ) - } - ) - - # Create messages for the LLM - messages = [ - SystemMessage(content=get_answer_outline_system_prompt(language=language)), - HumanMessage(content=human_message_content), - ] - - # Call the LLM directly without using structured output - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - "⚙️ Processing answer structure..." - ) - } - ) - - response = await llm.ainvoke(messages) - - # Parse the JSON response manually - try: - # Extract JSON content from the response - content = response.content - - # Find the JSON in the content (handle case where LLM might add additional text) - json_start = content.find("{") - json_end = content.rfind("}") + 1 - if json_start >= 0 and json_end > json_start: - json_str = content[json_start:json_end] - - # Parse the JSON string - parsed_data = json.loads(json_str) - - # Convert to Pydantic model - answer_outline = AnswerOutline(**parsed_data) - - total_questions = sum( - len(section.questions) for section in answer_outline.answer_outline - ) - - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - f"✅ Successfully generated outline with {len(answer_outline.answer_outline)} sections and {total_questions} research questions!" - ) - } - ) - - print( - f"Successfully generated answer outline with {len(answer_outline.answer_outline)} sections" - ) - - # Return state update - return {"answer_outline": answer_outline} - else: - # If JSON structure not found, raise a clear error - error_message = ( - f"Could not find valid JSON in LLM response. Raw response: {content}" - ) - writer({"yield_value": streaming_service.format_error(error_message)}) - raise ValueError(error_message) - - except (json.JSONDecodeError, ValueError) as e: - # Log the error and re-raise it - error_message = f"Error parsing LLM response: {e!s}" - writer({"yield_value": streaming_service.format_error(error_message)}) - - print(f"Error parsing LLM response: {e!s}") - print(f"Raw response: {response.content}") - raise - - async def fetch_relevant_documents( research_questions: list[str], user_id: str, @@ -1453,439 +1297,6 @@ async def fetch_relevant_documents( return deduplicated_docs -async def process_sections( - state: State, config: RunnableConfig, writer: StreamWriter -) -> dict[str, Any]: - """ - Process all sections in parallel and combine the results. - - This node takes the answer outline from the previous step, fetches relevant documents - for all questions across all sections once, and then processes each section in parallel - using the sub_section_writer graph with the shared document pool. - - Returns: - Dict containing the final written report in the "final_written_report" key. - """ - # Get configuration and answer outline from state - configuration = Configuration.from_runnable_config(config) - answer_outline = state.answer_outline - streaming_service = state.streaming_service - - # Initialize a dictionary to track content for all sections - # This is used to maintain section content while streaming multiple sections - section_contents = {} - - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - "🚀 Starting to process research sections..." - ) - } - ) - - print(f"Processing sections from outline: {answer_outline is not None}") - - if not answer_outline: - error_message = "No answer outline was provided. Cannot generate report." - writer({"yield_value": streaming_service.format_error(error_message)}) - return { - "final_written_report": "No answer outline was provided. Cannot generate final report." - } - - # Collect all questions from all sections - all_questions = [] - for section in answer_outline.answer_outline: - all_questions.extend(section.questions) - - print(f"Collected {len(all_questions)} questions from all sections") - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - f"🧩 Found {len(all_questions)} research questions across {len(answer_outline.answer_outline)} sections" - ) - } - ) - - # Fetch relevant documents once for all questions - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - "🔍 Searching for relevant information across all connectors..." - ) - } - ) - - if configuration.num_sections == 1: - top_k = 10 - elif configuration.num_sections == 3: - top_k = 20 - elif configuration.num_sections == 6: - top_k = 30 - else: - top_k = 10 - - relevant_documents = [] - user_selected_documents = [] - user_selected_sources = [] - - try: - # First, fetch user-selected documents if any - if configuration.document_ids_to_add_in_context: - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - f"📋 Including {len(configuration.document_ids_to_add_in_context)} user-selected documents..." - ) - } - ) - - ( - user_selected_sources, - user_selected_documents, - ) = await fetch_documents_by_ids( - document_ids=configuration.document_ids_to_add_in_context, - user_id=configuration.user_id, - db_session=state.db_session, - ) - - if user_selected_documents: - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - f"✅ Successfully added {len(user_selected_documents)} user-selected documents to context" - ) - } - ) - - # Create connector service using state db_session - connector_service = ConnectorService( - state.db_session, user_id=configuration.user_id - ) - await connector_service.initialize_counter() - - relevant_documents = await fetch_relevant_documents( - research_questions=all_questions, - user_id=configuration.user_id, - search_space_id=configuration.search_space_id, - db_session=state.db_session, - connectors_to_search=configuration.connectors_to_search, - writer=writer, - state=state, - top_k=top_k, - connector_service=connector_service, - search_mode=configuration.search_mode, - user_selected_sources=user_selected_sources, - ) - except Exception as e: - error_message = f"Error fetching relevant documents: {e!s}" - print(error_message) - writer({"yield_value": streaming_service.format_error(error_message)}) - # Log the error and continue with an empty list of documents - # This allows the process to continue, but the report might lack information - relevant_documents = [] - - # Combine user-selected documents with connector-fetched documents - all_documents = user_selected_documents + relevant_documents - - print(f"Fetched {len(relevant_documents)} relevant documents for all sections") - print( - f"Added {len(user_selected_documents)} user-selected documents for all sections" - ) - print(f"Total documents for sections: {len(all_documents)}") - - # Extract and stream sources from all_documents - if all_documents: - sources_to_stream = extract_sources_from_documents(all_documents) - writer( - {"yield_value": streaming_service.format_sources_delta(sources_to_stream)} - ) - - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - f"✨ Starting to draft {len(answer_outline.answer_outline)} sections using {len(all_documents)} total document chunks ({len(user_selected_documents)} user-selected + {len(relevant_documents)} connector-found)" - ) - } - ) - - # Create tasks to process each section in parallel with the same document set - section_tasks = [] - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - "⚙️ Creating processing tasks for each section..." - ) - } - ) - - for i, section in enumerate(answer_outline.answer_outline): - if i == 0: - sub_section_type = SubSectionType.START - elif i == len(answer_outline.answer_outline) - 1: - sub_section_type = SubSectionType.END - else: - sub_section_type = SubSectionType.MIDDLE - - # Initialize the section_contents entry for this section - section_contents[i] = { - "title": section.section_title, - "content": "", - "index": i, - } - - section_tasks.append( - process_section_with_documents( - section_id=i, - section_title=section.section_title, - section_questions=section.questions, - user_query=configuration.user_query, - user_id=configuration.user_id, - search_space_id=configuration.search_space_id, - relevant_documents=all_documents, # Use combined documents - state=state, - writer=writer, - sub_section_type=sub_section_type, - section_contents=section_contents, - ) - ) - - # Run all section processing tasks in parallel - print(f"Running {len(section_tasks)} section processing tasks in parallel") - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - f"⏳ Processing {len(section_tasks)} sections simultaneously..." - ) - } - ) - - section_results = await asyncio.gather(*section_tasks, return_exceptions=True) - - # Handle any exceptions in the results - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - "🧵 Combining section results into final report..." - ) - } - ) - - processed_results = [] - for i, result in enumerate(section_results): - if isinstance(result, Exception): - section_title = answer_outline.answer_outline[i].section_title - error_message = f"Error processing section '{section_title}': {result!s}" - print(error_message) - writer({"yield_value": streaming_service.format_error(error_message)}) - processed_results.append(error_message) - else: - processed_results.append(result) - - # Combine the results into a final report with section titles - final_report = [] - for _i, (section, content) in enumerate( - zip(answer_outline.answer_outline, processed_results, strict=False) - ): - # Skip adding the section header since the content already contains the title - final_report.append(content) - final_report.append("\n") - - # Stream each section with its title - writer( - { - "yield_value": state.streaming_service.format_text_chunk( - f"# {section.section_title}\n\n{content}" - ) - } - ) - - # Join all sections with newlines - final_written_report = "\n".join(final_report) - print(f"Generated final report with {len(final_report)} parts") - - writer( - { - "yield_value": streaming_service.format_terminal_info_delta( - "🎉 Final research report generated successfully!" - ) - } - ) - - # Use the shared documents for further question generation - # Since all sections used the same document pool, we can use it directly - return { - "final_written_report": final_written_report, - "reranked_documents": all_documents, - } - - -async def process_section_with_documents( - section_id: int, - section_title: str, - section_questions: list[str], - user_id: str, - search_space_id: int, - relevant_documents: list[dict[str, Any]], - user_query: str, - state: State = None, - writer: StreamWriter = None, - sub_section_type: SubSectionType = SubSectionType.MIDDLE, - section_contents: dict[int, dict[str, Any]] | None = None, -) -> str: - """ - Process a single section using pre-fetched documents. - - Args: - section_id: The ID of the section - section_title: The title of the section - section_questions: List of research questions for this section - user_id: The user ID - search_space_id: The search space ID - relevant_documents: Pre-fetched documents to use for this section - state: The current state - writer: StreamWriter for sending progress updates - sub_section_type: The type of section (start, middle, end) - section_contents: Dictionary to track content across multiple sections - - Returns: - The written section content - """ - try: - # Use the provided documents - documents_to_use = relevant_documents - - # Send status update via streaming if available - if state and state.streaming_service and writer: - writer( - { - "yield_value": state.streaming_service.format_terminal_info_delta( - f'📝 Writing section: "{section_title}" with {len(section_questions)} research questions' - ) - } - ) - - # Fallback if no documents found - if not documents_to_use: - print(f"No relevant documents found for section: {section_title}") - if state and state.streaming_service and writer: - writer( - { - "yield_value": state.streaming_service.format_terminal_info_delta( - f'📝 Writing section "{section_title}" using general knowledge (no specific sources found)' - ) - } - ) - - documents_to_use = [ - {"content": f"No specific information was found for: {question}"} - for question in section_questions - ] - - # Call the sub_section_writer graph with the appropriate config - config = { - "configurable": { - "sub_section_title": section_title, - "sub_section_questions": section_questions, - "sub_section_type": sub_section_type, - "user_query": user_query, - "relevant_documents": documents_to_use, - "user_id": user_id, - "search_space_id": search_space_id, - } - } - - # Create the initial state with db_session and chat_history - sub_state = {"db_session": state.db_session, "chat_history": state.chat_history} - - # Invoke the sub-section writer graph with streaming - print(f"Invoking sub_section_writer for: {section_title}") - if state and state.streaming_service and writer: - writer( - { - "yield_value": state.streaming_service.format_terminal_info_delta( - f'🧠 Analyzing information and drafting content for section: "{section_title}"' - ) - } - ) - - # Variables to track streaming state - complete_content = "" # Tracks the complete content received so far - - async for _chunk_type, chunk in sub_section_writer_graph.astream( - sub_state, config, stream_mode=["values"] - ): - if "final_answer" in chunk: - new_content = chunk["final_answer"] - if new_content and new_content != complete_content: - # Extract only the new content (delta) - delta = new_content[len(complete_content) :] - - # Update what we've processed so far - complete_content = new_content - - # Only stream if there's actual new content - if delta and state and state.streaming_service and writer: - # Update terminal with real-time progress indicator - writer( - { - "yield_value": state.streaming_service.format_terminal_info_delta( - f"✍️ Writing section {section_id + 1}... ({len(complete_content.split())} words)" - ) - } - ) - - # Update section_contents with just the new delta - section_contents[section_id]["content"] += delta - - # Build UI-friendly content for all sections - complete_answer = [] - for i in range(len(section_contents)): - if i in section_contents and section_contents[i]["content"]: - # Add section header - complete_answer.append( - f"# {section_contents[i]['title']}" - ) - complete_answer.append("") # Empty line after title - - # Add section content - content_lines = section_contents[i]["content"].split( - "\n" - ) - complete_answer.extend(content_lines) - complete_answer.append("") # Empty line after content - - # Set default if no content was received - if not complete_content: - complete_content = "No content was generated for this section." - section_contents[section_id]["content"] = complete_content - - # Final terminal update - if state and state.streaming_service and writer: - writer( - { - "yield_value": state.streaming_service.format_terminal_info_delta( - f'✅ Completed section: "{section_title}"' - ) - } - ) - - return complete_content - except Exception as e: - print(f"Error processing section '{section_title}': {e!s}") - - # Send error update via streaming if available - if state and state.streaming_service and writer: - writer( - { - "yield_value": state.streaming_service.format_error( - f'Error processing section "{section_title}": {e!s}' - ) - } - ) - - return f"Error processing section: {section_title}. Details: {e!s}" - - async def reformulate_user_query( state: State, config: RunnableConfig, writer: StreamWriter ) -> dict[str, Any]: @@ -2133,7 +1544,7 @@ async def generate_further_questions( """ Generate contextually relevant follow-up questions based on chat history and available documents. - This node takes the chat history and reranked documents from sub-agents (qna_agent or sub_section_writer) + This node takes the chat history and reranked documents from the QNA agent and uses an LLM to generate follow-up questions that would naturally extend the conversation and provide additional value to the user. diff --git a/surfsense_backend/app/agents/researcher/prompts.py b/surfsense_backend/app/agents/researcher/prompts.py index 825772a2..96de760b 100644 --- a/surfsense_backend/app/agents/researcher/prompts.py +++ b/surfsense_backend/app/agents/researcher/prompts.py @@ -2,105 +2,12 @@ def _build_language_instruction(language: str | None = None): + """Build language instruction for prompts.""" if language: return f"\n\nIMPORTANT: Please respond in {language} language. All your responses, explanations, and analysis should be written in {language}." return "" -def get_answer_outline_system_prompt(language: str | None = None) -> str: - language_instruction = _build_language_instruction(language) - - return f""" -Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")} -{language_instruction} - -You are an expert research assistant specializing in structuring information. Your task is to create a detailed and logical research outline based on the user's query. This outline will serve as the blueprint for generating a comprehensive research report. - - -- user_query (string): The main question or topic the user wants researched. This guides the entire outline creation process. -- num_sections (integer): The target number of distinct sections the final research report should have. This helps control the granularity and structure of the outline. - - - -A JSON object with the following structure: -{{ - "answer_outline": [ - {{ - "section_id": 0, - "section_title": "Section Title", - "questions": [ - "Question 1 to research for this section", - "Question 2 to research for this section" - ] - }} - ] -}} - - - -1. **Deconstruct the `user_query`:** Identify the key concepts, entities, and the core information requested by the user. -2. **Determine Section Themes:** Based on the analysis and the requested `num_sections`, divide the topic into distinct, logical themes or sub-topics. Each theme will become a section. Ensure these themes collectively address the `user_query` comprehensively. -3. **Develop Sections:** For *each* of the `num_sections`: - * **Assign `section_id`:** Start with 0 and increment sequentially for each section. - * **Craft `section_title`:** Write a concise, descriptive title that clearly defines the scope and focus of the section's theme. - * **Formulate Research `questions`:** Generate 2 to 5 specific, targeted research questions for this section. These questions must: - * Directly relate to the `section_title` and explore its key aspects. - * Be answerable through focused research (e.g., searching documents, databases, or knowledge bases). - * Be distinct from each other and from questions in other sections. Avoid redundancy. - * Collectively guide the gathering of information needed to fully address the section's theme. -4. **Ensure Logical Flow:** Arrange the sections in a coherent and intuitive sequence. Consider structures like: - * General background -> Specific details -> Analysis/Comparison -> Applications/Implications - * Problem definition -> Proposed solutions -> Evaluation -> Conclusion - * Chronological progression -5. **Verify Completeness and Cohesion:** Review the entire outline (`section_titles` and `questions`) to confirm that: - * All sections together provide a complete and well-structured answer to the original `user_query`. - * There are no significant overlaps or gaps in coverage between sections. -6. **Adhere Strictly to Output Format:** Ensure the final output is a valid JSON object matching the specified structure exactly, including correct field names (`answer_outline`, `section_id`, `section_title`, `questions`) and data types. - - - -User Query: "What are the health benefits of meditation?" -Number of Sections: 3 - -{{ - "answer_outline": [ - {{ - "section_id": 0, - "section_title": "Physical Health Benefits of Meditation", - "questions": [ - "What physiological changes occur in the body during meditation?", - "How does regular meditation affect blood pressure and heart health?", - "What impact does meditation have on inflammation and immune function?", - "Can meditation help with pain management, and if so, how?" - ] - }}, - {{ - "section_id": 1, - "section_title": "Mental Health Benefits of Meditation", - "questions": [ - "How does meditation affect stress and anxiety levels?", - "What changes in brain structure or function have been observed in meditation practitioners?", - "Can meditation help with depression and mood disorders?", - "What is the relationship between meditation and cognitive function?" - ] - }}, - {{ - "section_id": 2, - "section_title": "Best Meditation Practices for Maximum Benefits", - "questions": [ - "What are the most effective meditation techniques for beginners?", - "How long and how frequently should one meditate to see benefits?", - "Are there specific meditation approaches best suited for particular health goals?", - "What common obstacles prevent people from experiencing meditation benefits?" - ] - }} - ] -}} - - -""" - - def get_further_questions_system_prompt(): return f""" Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")} diff --git a/surfsense_backend/app/agents/researcher/state.py b/surfsense_backend/app/agents/researcher/state.py index 0e10dfac..90f7039b 100644 --- a/surfsense_backend/app/agents/researcher/state.py +++ b/surfsense_backend/app/agents/researcher/state.py @@ -28,8 +28,6 @@ class State: chat_history: list[Any] | None = field(default_factory=list) reformulated_query: str | None = field(default=None) - # Using field to explicitly mark as part of state - answer_outline: Any | None = field(default=None) further_questions: Any | None = field(default=None) # Temporary field to hold reranked documents from sub-agents for further question generation diff --git a/surfsense_backend/app/agents/researcher/sub_section_writer/__init__.py b/surfsense_backend/app/agents/researcher/sub_section_writer/__init__.py deleted file mode 100644 index 8459b297..00000000 --- a/surfsense_backend/app/agents/researcher/sub_section_writer/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""New LangGraph Agent. - -This module defines a custom graph. -""" - -from .graph import graph - -__all__ = ["graph"] diff --git a/surfsense_backend/app/agents/researcher/sub_section_writer/configuration.py b/surfsense_backend/app/agents/researcher/sub_section_writer/configuration.py deleted file mode 100644 index 29cbf45d..00000000 --- a/surfsense_backend/app/agents/researcher/sub_section_writer/configuration.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Define the configurable parameters for the agent.""" - -from __future__ import annotations - -from dataclasses import dataclass, fields -from enum import Enum -from typing import Any - -from langchain_core.runnables import RunnableConfig - - -class SubSectionType(Enum): - """Enum defining the type of sub-section.""" - - START = "START" - MIDDLE = "MIDDLE" - END = "END" - - -@dataclass(kw_only=True) -class Configuration: - """The configuration for the agent.""" - - # Input parameters provided at invocation - sub_section_title: str - sub_section_questions: list[str] - sub_section_type: SubSectionType - user_query: str - relevant_documents: list[Any] # Documents provided directly to the agent - user_id: str - search_space_id: int - - @classmethod - def from_runnable_config( - cls, config: RunnableConfig | None = None - ) -> Configuration: - """Create a Configuration instance from a RunnableConfig object.""" - configurable = (config.get("configurable") or {}) if config else {} - _fields = {f.name for f in fields(cls) if f.init} - return cls(**{k: v for k, v in configurable.items() if k in _fields}) diff --git a/surfsense_backend/app/agents/researcher/sub_section_writer/graph.py b/surfsense_backend/app/agents/researcher/sub_section_writer/graph.py deleted file mode 100644 index 35ebc4e0..00000000 --- a/surfsense_backend/app/agents/researcher/sub_section_writer/graph.py +++ /dev/null @@ -1,21 +0,0 @@ -from langgraph.graph import StateGraph - -from .configuration import Configuration -from .nodes import rerank_documents, write_sub_section -from .state import State - -# Define a new graph -workflow = StateGraph(State, config_schema=Configuration) - -# Add the nodes to the graph -workflow.add_node("rerank_documents", rerank_documents) -workflow.add_node("write_sub_section", write_sub_section) - -# Connect the nodes -workflow.add_edge("__start__", "rerank_documents") -workflow.add_edge("rerank_documents", "write_sub_section") -workflow.add_edge("write_sub_section", "__end__") - -# Compile the workflow into an executable graph -graph = workflow.compile() -graph.name = "Sub Section Writer" # This defines the custom name in LangSmith diff --git a/surfsense_backend/app/agents/researcher/sub_section_writer/nodes.py b/surfsense_backend/app/agents/researcher/sub_section_writer/nodes.py deleted file mode 100644 index 153cafac..00000000 --- a/surfsense_backend/app/agents/researcher/sub_section_writer/nodes.py +++ /dev/null @@ -1,241 +0,0 @@ -from typing import Any - -from langchain_core.messages import HumanMessage, SystemMessage -from langchain_core.runnables import RunnableConfig - -from app.services.reranker_service import RerankerService - -from ..utils import ( - calculate_token_count, - format_documents_section, - langchain_chat_history_to_str, - optimize_documents_for_token_limit, -) -from .configuration import Configuration, SubSectionType -from .prompts import get_citation_system_prompt, get_no_documents_system_prompt -from .state import State - - -async def rerank_documents(state: State, config: RunnableConfig) -> dict[str, Any]: - """ - Rerank the documents based on relevance to the sub-section title. - - This node takes the relevant documents provided in the configuration, - reranks them using the reranker service based on the sub-section title, - and updates the state with the reranked documents. - - Returns: - Dict containing the reranked documents. - """ - # Get configuration and relevant documents - configuration = Configuration.from_runnable_config(config) - documents = configuration.relevant_documents - sub_section_questions = configuration.sub_section_questions - - # If no documents were provided, return empty list - if not documents or len(documents) == 0: - return {"reranked_documents": []} - - # Get reranker service from app config - reranker_service = RerankerService.get_reranker_instance() - - # Use documents as is if no reranker service is available - reranked_docs = documents - - if reranker_service: - try: - # Use the sub-section questions for reranking context - # rerank_query = "\n".join(sub_section_questions) - # rerank_query = configuration.user_query - - rerank_query = ( - configuration.user_query + "\n" + "\n".join(sub_section_questions) - ) - - # Convert documents to format expected by reranker if needed - reranker_input_docs = [ - { - "chunk_id": doc.get("chunk_id", f"chunk_{i}"), - "content": doc.get("content", ""), - "score": doc.get("score", 0.0), - "document": { - "id": doc.get("document", {}).get("id", ""), - "title": doc.get("document", {}).get("title", ""), - "document_type": doc.get("document", {}).get( - "document_type", "" - ), - "metadata": doc.get("document", {}).get("metadata", {}), - }, - } - for i, doc in enumerate(documents) - ] - - # Rerank documents using the section title - reranked_docs = reranker_service.rerank_documents( - rerank_query, reranker_input_docs - ) - - # Sort by score in descending order - reranked_docs.sort(key=lambda x: x.get("score", 0), reverse=True) - - print( - f"Reranked {len(reranked_docs)} documents for section: {configuration.sub_section_title}" - ) - except Exception as e: - print(f"Error during reranking: {e!s}") - # Use original docs if reranking fails - - return {"reranked_documents": reranked_docs} - - -async def write_sub_section(state: State, config: RunnableConfig) -> dict[str, Any]: - """ - Write the sub-section using the provided documents. - - This node takes the relevant documents provided in the configuration and uses - an LLM to generate a comprehensive answer to the sub-section title with - proper citations. The citations follow [citation:source_id] format using source IDs from the - documents. If no documents are provided, it will use chat history to generate - content. - - Returns: - Dict containing the final answer in the "final_answer" key. - """ - from app.services.llm_service import get_user_fast_llm - - # Get configuration and relevant documents from configuration - configuration = Configuration.from_runnable_config(config) - documents = state.reranked_documents - user_id = configuration.user_id - search_space_id = configuration.search_space_id - - # Get user's fast LLM - llm = await get_user_fast_llm(state.db_session, user_id, search_space_id) - if not llm: - error_message = f"No fast LLM configured for user {user_id} in search space {search_space_id}" - print(error_message) - raise RuntimeError(error_message) - - # Extract configuration data - section_title = configuration.sub_section_title - sub_section_questions = configuration.sub_section_questions - user_query = configuration.user_query - sub_section_type = configuration.sub_section_type - - # Format the questions as bullet points for clarity - questions_text = "\n".join([f"- {question}" for question in sub_section_questions]) - - # Provide context based on the subsection type - section_position_context_map = { - SubSectionType.START: "This is the INTRODUCTION section.", - SubSectionType.MIDDLE: "This is a MIDDLE section. Ensure this content flows naturally from previous sections and into subsequent ones. This could be any middle section in the document, so maintain coherence with the overall structure while addressing the specific topic of this section. Do not provide any conclusions in this section, as conclusions should only appear in the final section.", - SubSectionType.END: "This is the CONCLUSION section. Focus on summarizing key points, providing closure.", - } - section_position_context = section_position_context_map.get(sub_section_type, "") - - # Determine if we have documents and optimize for token limits - has_documents_initially = documents and len(documents) > 0 - - chat_history_str = langchain_chat_history_to_str(state.chat_history) - - if has_documents_initially: - # Create base message template for token calculation (without documents) - base_human_message_template = f""" - - Now user's query is: - - {user_query} - - - The sub-section title is: - - {section_title} - - - - {section_position_context} - - - - {questions_text} - - - Please write content for this sub-section using the provided source material and cite all information appropriately. - """ - - # Use initial system prompt for token calculation - initial_system_prompt = get_citation_system_prompt(chat_history_str) - base_messages = [ - SystemMessage(content=initial_system_prompt), - HumanMessage(content=base_human_message_template), - ] - - # Optimize documents to fit within token limits - optimized_documents, has_optimized_documents = ( - optimize_documents_for_token_limit(documents, base_messages, llm.model) - ) - - # Update state based on optimization result - documents = optimized_documents - has_documents = has_optimized_documents - else: - has_documents = False - - # Choose system prompt based on final document availability - system_prompt = ( - get_citation_system_prompt(chat_history_str) - if has_documents - else get_no_documents_system_prompt(chat_history_str) - ) - - # Generate documents section - documents_text = ( - format_documents_section(documents, "Source material") if has_documents else "" - ) - - # Create final human message content - instruction_text = ( - "Please write content for this sub-section using the provided source material and cite all information appropriately." - if has_documents - else "Please write content for this sub-section based on our conversation history and your general knowledge." - ) - - human_message_content = f""" - {documents_text} - - Now user's query is: - - {user_query} - - - The sub-section title is: - - {section_title} - - - - {section_position_context} - - - - {questions_text} - - - {instruction_text} - """ - - # Create final messages for the LLM - messages_with_chat_history = [ - SystemMessage(content=system_prompt), - HumanMessage(content=human_message_content), - ] - - # Log final token count - total_tokens = calculate_token_count(messages_with_chat_history, llm.model) - print(f"Final token count: {total_tokens}") - - # Call the LLM and get the response - response = await llm.ainvoke(messages_with_chat_history) - final_answer = response.content - - return {"final_answer": final_answer} diff --git a/surfsense_backend/app/agents/researcher/sub_section_writer/prompts.py b/surfsense_backend/app/agents/researcher/sub_section_writer/prompts.py deleted file mode 100644 index a0134bf7..00000000 --- a/surfsense_backend/app/agents/researcher/sub_section_writer/prompts.py +++ /dev/null @@ -1,239 +0,0 @@ -import datetime - -from ..prompts import _build_language_instruction - - -def get_citation_system_prompt( - chat_history: str | None = None, language: str | None = None -): - chat_history_section = ( - f""" - -{chat_history if chat_history else "NO CHAT HISTORY PROVIDED"} - -""" - if chat_history is not None - else """ - -NO CHAT HISTORY PROVIDED - -""" - ) - - # Add language instruction if specified - language_instruction = _build_language_instruction(language) - - return f""" -Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")} -You are SurfSense, an advanced AI research assistant that synthesizes information from multiple knowledge sources to provide comprehensive, well-cited answers to user queries.{language_instruction} -{chat_history_section} - -- EXTENSION: "Web content saved via SurfSense browser extension" (personal browsing history) -- CRAWLED_URL: "Webpages indexed by SurfSense web crawler" (personally selected websites) -- FILE: "User-uploaded documents (PDFs, Word, etc.)" (personal files) -- SLACK_CONNECTOR: "Slack conversations and shared content" (personal workspace communications) -- NOTION_CONNECTOR: "Notion workspace pages and databases" (personal knowledge management) -- YOUTUBE_VIDEO: "YouTube video transcripts and metadata" (personally saved videos) -- GITHUB_CONNECTOR: "GitHub repository content and issues" (personal repositories and interactions) -- ELASTICSEARCH_CONNECTOR: "Elasticsearch documents and indices (indexed content from your ES connector)" (personal search index) -- LINEAR_CONNECTOR: "Linear project issues and discussions" (personal project management) -- JIRA_CONNECTOR: "Jira project issues, tickets, and comments" (personal project tracking) -- CONFLUENCE_CONNECTOR: "Confluence pages and comments" (personal project documentation) -- CLICKUP_CONNECTOR: "ClickUp tasks and project data" (personal task management) -- GOOGLE_CALENDAR_CONNECTOR: "Google Calendar events, meetings, and schedules" (personal calendar and time management) -- GOOGLE_GMAIL_CONNECTOR: "Google Gmail emails and conversations" (personal emails and communications) -- DISCORD_CONNECTOR: "Discord server messages and channels" (personal community interactions) -- AIRTABLE_CONNECTOR: "Airtable records, tables, and database content" (personal data management and organization) -- TAVILY_API: "Tavily search API results" (personalized search results) -- LINKUP_API: "Linkup search API results" (personalized search results) -- LUMA_CONNECTOR: "Luma events" - - -1. Review the chat history to understand the conversation context and any previous topics discussed. -2. Carefully analyze all provided documents in the section's. -3. Extract relevant information that addresses the user's query. -4. Synthesize a comprehensive, personalized answer using information from the user's personal knowledge sources. -5. For EVERY piece of information you include from the documents, add a citation in the format [citation:knowledge_source_id] where knowledge_source_id is the source_id from the document's metadata. -6. Make sure ALL factual statements from the documents have proper citations. -7. If multiple documents support the same point, include all relevant citations [citation:source_id1], [citation:source_id2]. -8. Present information in a logical, coherent flow that reflects the user's personal context. -9. Use your own words to connect ideas, but cite ALL information from the documents. -10. If documents contain conflicting information, acknowledge this and present both perspectives with appropriate citations. -11. Do not make up or include information not found in the provided documents. -12. Use the chat history to maintain conversation continuity and refer to previous discussions when relevant. -13. CRITICAL: You MUST use the exact source_id value from each document's metadata for citations. Do not create your own citation numbers. -14. CRITICAL: Every citation MUST be in the format [citation:knowledge_source_id] where knowledge_source_id is the exact source_id value. -15. CRITICAL: Never modify or change the source_id - always use the original values exactly as provided in the metadata. -16. CRITICAL: Do not return citations as clickable links. -17. CRITICAL: Never format citations as markdown links like "([citation:5](https://example.com))". Always use plain square brackets only. -18. CRITICAL: Citations must ONLY appear as [citation:source_id] or [citation:source_id1], [citation:source_id2] format - never with parentheses, hyperlinks, or other formatting. -19. CRITICAL: Never make up source IDs. Only use source_id values that are explicitly provided in the document metadata. -20. CRITICAL: If you are unsure about a source_id, do not include a citation rather than guessing or making one up. -21. CRITICAL: Focus only on answering the user's query. Any guiding questions provided are for your thinking process only and should not be mentioned in your response. -22. CRITICAL: Ensure your response aligns with the provided sub-section title and section position. -23. CRITICAL: Remember that all knowledge sources contain personal information - provide answers that reflect this personal context. - - - -- Write in clear, professional language suitable for academic or technical audiences -- Tailor your response to the user's personal context based on their knowledge sources -- Organize your response with appropriate paragraphs, headings, and structure -- Every fact from the documents must have a citation in the format [citation:knowledge_source_id] where knowledge_source_id is the EXACT source_id from the document's metadata -- Citations should appear at the end of the sentence containing the information they support -- Multiple citations should be separated by commas: [citation:source_id1], [citation:source_id2], [citation:source_id3] -- No need to return references section. Just citations in answer. -- NEVER create your own citation format - use the exact source_id values from the documents in the [citation:source_id] format. -- NEVER format citations as clickable links or as markdown links like "([citation:5](https://example.com))". Always use plain square brackets only. -- NEVER make up source IDs if you are unsure about the source_id. It is better to omit the citation than to guess. -- NEVER include or mention the guiding questions in your response. They are only to help guide your thinking. -- ALWAYS focus on answering the user's query directly from the information in the documents. -- ALWAYS provide personalized answers that reflect the user's own knowledge and context. - - - - - - - 1 - EXTENSION - - - The Great Barrier Reef is the world's largest coral reef system, stretching over 2,300 kilometers along the coast of Queensland, Australia. It comprises over 2,900 individual reefs and 900 islands. - - - - - - 13 - YOUTUBE_VIDEO - - - Climate change poses a significant threat to coral reefs worldwide. Rising ocean temperatures have led to mass coral bleaching events in the Great Barrier Reef in 2016, 2017, and 2020. - - - - - - 21 - CRAWLED_URL - - - The Great Barrier Reef was designated a UNESCO World Heritage Site in 1981 due to its outstanding universal value and biological diversity. It is home to over 1,500 species of fish and 400 types of coral. - - - - - - - Based on your saved browser content and videos, the Great Barrier Reef is the world's largest coral reef system, stretching over 2,300 kilometers along the coast of Queensland, Australia [citation:1]. From your browsing history, you've looked into its designation as a UNESCO World Heritage Site in 1981 due to its outstanding universal value and biological diversity [citation:21]. The reef is home to over 1,500 species of fish and 400 types of coral [citation:21]. According to a YouTube video you've watched, climate change poses a significant threat to coral reefs worldwide, with rising ocean temperatures leading to mass coral bleaching events in the Great Barrier Reef in 2016, 2017, and 2020 [citation:13]. The reef system comprises over 2,900 individual reefs and 900 islands [citation:1], making it an ecological treasure that requires protection from multiple threats [citation:1], [citation:13]. - - - -DO NOT use any of these incorrect citation formats: -- Using parentheses and markdown links: ([citation:1](https://github.com/MODSetter/SurfSense)) -- Using parentheses around brackets: ([citation:1]) -- Using hyperlinked text: [link to source 1](https://example.com) -- Using footnote style: ... reef system¹ -- Making up source IDs when source_id is unknown -- Using old IEEE format: [1], [2], [3] -- Using source types instead of IDs: [citation:EXTENSION] instead of [citation:1] - - - -ONLY use the format [citation:source_id] or multiple citations [citation:source_id1], [citation:source_id2], [citation:source_id3] -Note that the citations use the exact source_id values (1, 13, and 21) from the document metadata. Citations appear at the end of sentences and maintain the new citation format. - - -When you see a user query like: - - Give all linear issues. - - -Focus exclusively on answering this query using information from the provided documents, which contain the user's personal knowledge and data. - -If guiding questions are provided in a section, use them only to guide your thinking process. Do not mention or list these questions in your response. - -Make sure your response: -1. Considers the chat history for context and conversation continuity -2. Directly answers the user's query with personalized information from their own knowledge sources -3. Fits the provided sub-section title and section position -4. Uses proper citations for all information from documents -5. Is well-structured and professional in tone -6. Acknowledges the personal nature of the information being provided - -""" - - -def get_no_documents_system_prompt( - chat_history: str | None = None, language: str | None = None -): - chat_history_section = ( - f""" - -{chat_history if chat_history else "NO CHAT HISTORY PROVIDED"} - -""" - if chat_history is not None - else """ - -NO CHAT HISTORY PROVIDED - -""" - ) - - # Add language instruction if specified - language_instruction = _build_language_instruction(language) - - return f""" -Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")} -You are SurfSense, an advanced AI research assistant that helps users create well-structured content for their documents and research.{language_instruction} -{chat_history_section} - -You are writing content for a specific sub-section of a document. No specific documents from the user's personal knowledge base are available, so you should create content based on: -1. The conversation history and context -2. Your general knowledge and expertise -3. The specific sub-section requirements provided -4. Understanding of the user's needs based on our conversation - - - -1. Write comprehensive, well-structured content for the specified sub-section -2. Draw upon the conversation history to understand the user's context and needs -3. Use your general knowledge to provide accurate, detailed information -4. Ensure the content fits the sub-section title and position in the document -5. Follow the section positioning guidelines (introduction, middle, or conclusion) -6. Structure the content logically with appropriate flow and transitions -7. Write in a professional, academic tone suitable for research documents -8. Acknowledge when you're drawing from general knowledge rather than personal sources -9. If the content would benefit from personalized information, gently mention that adding relevant sources to SurfSense could enhance the content -10. Ensure the content addresses the guiding questions without explicitly mentioning them -11. Create content that flows naturally and maintains coherence with the overall document structure - - - -- Write in clear, professional language suitable for academic or research documents -- Organize content with appropriate paragraphs and logical structure -- No citations are needed since you're using general knowledge -- Follow the specified section type (START/MIDDLE/END) guidelines -- Ensure content flows naturally and maintains document coherence -- Be comprehensive and detailed while staying focused on the sub-section topic -- When appropriate, mention that adding relevant sources to SurfSense could provide more personalized and cited content - - - -- START (Introduction): Provide context, background, and introduce key concepts -- MIDDLE: Develop main points, provide detailed analysis, ensure smooth transitions -- END (Conclusion): Summarize key points, provide closure, synthesize main insights - - - -When writing content for a sub-section without access to personal documents: -1. Review the chat history to understand conversation context and maintain continuity -2. Create the most comprehensive and useful content possible using general knowledge -3. Ensure the content fits the sub-section title and document position -4. Draw upon conversation history for context about the user's needs -5. Write in a professional, research-appropriate tone -6. Address the guiding questions through natural content flow without explicitly listing them -7. Suggest how adding relevant sources to SurfSense could enhance future content when appropriate - -""" diff --git a/surfsense_backend/app/agents/researcher/sub_section_writer/state.py b/surfsense_backend/app/agents/researcher/sub_section_writer/state.py deleted file mode 100644 index 6fb5434b..00000000 --- a/surfsense_backend/app/agents/researcher/sub_section_writer/state.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Define the state structures for the agent.""" - -from __future__ import annotations - -from dataclasses import dataclass, field -from typing import Any - -from sqlalchemy.ext.asyncio import AsyncSession - - -@dataclass -class State: - """Defines the dynamic state for the agent during execution. - - This state tracks the database session and the outputs generated by the agent's nodes. - See: https://langchain-ai.github.io/langgraph/concepts/low_level/#state - for more information. - """ - - # Runtime context - db_session: AsyncSession - - chat_history: list[Any] | None = field(default_factory=list) - # OUTPUT: Populated by agent nodes - reranked_documents: list[Any] | None = None - final_answer: str | None = None diff --git a/surfsense_backend/app/agents/researcher/utils.py b/surfsense_backend/app/agents/researcher/utils.py index 01d9f8f9..a2c211f2 100644 --- a/surfsense_backend/app/agents/researcher/utils.py +++ b/surfsense_backend/app/agents/researcher/utils.py @@ -3,25 +3,6 @@ from langchain.schema import AIMessage, HumanMessage, SystemMessage from langchain_core.messages import BaseMessage from litellm import get_model_info, token_counter -from pydantic import BaseModel, Field - - -class Section(BaseModel): - """A section in the answer outline.""" - - section_id: int = Field(..., description="The zero-based index of the section") - section_title: str = Field(..., description="The title of the section") - questions: list[str] = Field( - ..., description="Questions to research for this section" - ) - - -class AnswerOutline(BaseModel): - """The complete answer outline with all sections.""" - - answer_outline: list[Section] = Field( - ..., description="List of sections in the answer outline" - ) class DocumentTokenInfo(NamedTuple): diff --git a/surfsense_backend/app/db.py b/surfsense_backend/app/db.py index 5183adc9..241be3fc 100644 --- a/surfsense_backend/app/db.py +++ b/surfsense_backend/app/db.py @@ -76,9 +76,6 @@ class SearchSourceConnectorType(str, Enum): class ChatType(str, Enum): QNA = "QNA" - REPORT_GENERAL = "REPORT_GENERAL" - REPORT_DEEP = "REPORT_DEEP" - REPORT_DEEPER = "REPORT_DEEPER" class LiteLLMProvider(str, Enum): diff --git a/surfsense_backend/app/tasks/stream_connector_search_results.py b/surfsense_backend/app/tasks/stream_connector_search_results.py index dd1ae4ce..1676c67f 100644 --- a/surfsense_backend/app/tasks/stream_connector_search_results.py +++ b/surfsense_backend/app/tasks/stream_connector_search_results.py @@ -38,16 +38,6 @@ async def stream_connector_search_results( """ streaming_service = StreamingService() - if research_mode == "REPORT_GENERAL": - num_sections = 1 - elif research_mode == "REPORT_DEEP": - num_sections = 3 - elif research_mode == "REPORT_DEEPER": - num_sections = 6 - else: - # Default fallback - num_sections = 1 - # Convert UUID to string if needed user_id_str = str(user_id) if isinstance(user_id, UUID) else user_id @@ -60,12 +50,10 @@ async def stream_connector_search_results( config = { "configurable": { "user_query": user_query, - "num_sections": num_sections, "connectors_to_search": selected_connectors, "user_id": user_id_str, "search_space_id": search_space_id, "search_mode": search_mode, - "research_mode": research_mode, "document_ids_to_add_in_context": document_ids_to_add_in_context, "language": language, # Add language to the configuration } diff --git a/surfsense_backend/app/utils/validators.py b/surfsense_backend/app/utils/validators.py index 53ba187d..c64ae967 100644 --- a/surfsense_backend/app/utils/validators.py +++ b/surfsense_backend/app/utils/validators.py @@ -201,7 +201,7 @@ def validate_research_mode(research_mode: Any) -> str: if not normalized_mode: raise HTTPException(status_code=400, detail="research_mode cannot be empty") - valid_modes = ["REPORT_GENERAL", "REPORT_DEEP", "REPORT_DEEPER", "QNA"] + valid_modes = ["QNA"] if normalized_mode not in valid_modes: raise HTTPException( status_code=400, diff --git a/surfsense_web/README.md b/surfsense_web/README.md deleted file mode 100644 index 244b92bd..00000000 --- a/surfsense_web/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# Next.js Token Handler Component - -This project includes a reusable client component for Next.js that handles token storage from URL parameters. - -## TokenHandler Component - -The `TokenHandler` component is designed to: - -1. Extract a token from URL parameters -2. Store the token in localStorage -3. Redirect the user to a specified path - -### Usage - -```tsx -import TokenHandler from '@/components/TokenHandler'; - -export default function AuthCallbackPage() { - return ( -
-

Authentication Callback

- -
- ); -} -``` - -### Props - -The component accepts the following props: - -- `redirectPath` (optional): Path to redirect after storing token (default: '/') -- `tokenParamName` (optional): Name of the URL parameter containing the token (default: 'token') -- `storageKey` (optional): Key to use when storing in localStorage (default: 'auth_token') - -### Example URL - -After authentication, redirect users to: -``` -https://your-domain.com/auth/callback?token=your-auth-token -``` - -## Implementation Details - -- Uses Next.js's `useSearchParams` hook to access URL parameters -- Uses `useRouter` for client-side navigation after token storage -- Includes error handling for localStorage operations -- Displays a loading message while processing - -## Security Considerations - -- This implementation assumes the token is passed securely -- Consider using HTTPS to prevent token interception -- For enhanced security, consider using HTTP-only cookies instead of localStorage -- The token in the URL might be visible in browser history and server logs - -This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app). - -## Getting Started - -First, run the development server: - -```bash -npm run dev -# or -yarn dev -# or -pnpm dev -# or -bun dev -``` - -Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. - -You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. - -This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel. - -## Learn More - -To learn more about Next.js, take a look at the following resources: - -- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. -- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. - -You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome! - -## Deploy on Vercel - -The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. - -Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details. diff --git a/surfsense_web/app/(home)/login/GoogleLoginButton.tsx b/surfsense_web/app/(home)/login/GoogleLoginButton.tsx index d1b5b1cc..3cb484fd 100644 --- a/surfsense_web/app/(home)/login/GoogleLoginButton.tsx +++ b/surfsense_web/app/(home)/login/GoogleLoginButton.tsx @@ -6,8 +6,8 @@ import { Logo } from "@/components/Logo"; import { AmbientBackground } from "./AmbientBackground"; export function GoogleLoginButton() { - const t = useTranslations('auth'); - + const t = useTranslations("auth"); + const handleGoogleLogin = () => { // Redirect to Google OAuth authorization URL fetch(`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/auth/google/authorize`) @@ -34,7 +34,7 @@ export function GoogleLoginButton() {

- {t('welcome_back')} + {t("welcome_back")}

- {t('cloud_dev_notice')}{" "} + {t("cloud_dev_notice")}{" "} - {t('docs')} + {t("docs")} {" "} - {t('cloud_dev_self_hosted')} + {t("cloud_dev_self_hosted")}

@@ -94,7 +94,7 @@ export function GoogleLoginButton() {
- {t('continue_with_google')} + {t("continue_with_google")} diff --git a/surfsense_web/app/(home)/login/LocalLoginForm.tsx b/surfsense_web/app/(home)/login/LocalLoginForm.tsx index 93720cfd..cd5f43b4 100644 --- a/surfsense_web/app/(home)/login/LocalLoginForm.tsx +++ b/surfsense_web/app/(home)/login/LocalLoginForm.tsx @@ -3,14 +3,14 @@ import { Eye, EyeOff } from "lucide-react"; import { AnimatePresence, motion } from "motion/react"; import Link from "next/link"; import { useRouter } from "next/navigation"; +import { useTranslations } from "next-intl"; import { useEffect, useState } from "react"; import { toast } from "sonner"; -import { useTranslations } from "next-intl"; import { getAuthErrorDetails, isNetworkError, shouldRetry } from "@/lib/auth-errors"; export function LocalLoginForm() { - const t = useTranslations('auth'); - const tCommon = useTranslations('common'); + const t = useTranslations("auth"); + const tCommon = useTranslations("common"); const [username, setUsername] = useState(""); const [password, setPassword] = useState(""); const [showPassword, setShowPassword] = useState(false); @@ -32,7 +32,7 @@ export function LocalLoginForm() { setErrorTitle(null); // Show loading toast - const loadingToast = toast.loading(tCommon('loading')); + const loadingToast = toast.loading(tCommon("loading")); try { // Create form data for the API request @@ -59,7 +59,7 @@ export function LocalLoginForm() { } // Success toast - toast.success(t('login_success'), { + toast.success(t("login_success"), { id: loadingToast, description: "Redirecting to dashboard...", duration: 2000, @@ -170,84 +170,84 @@ export function LocalLoginForm() { )} - - -
- - setUsername(e.target.value)} - className={`mt-1 block w-full rounded-md border px-3 py-2 shadow-sm focus:outline-none focus:ring-2 focus:ring-offset-2 dark:bg-gray-800 dark:text-white transition-colors ${ - error - ? "border-red-300 focus:border-red-500 focus:ring-red-500 dark:border-red-700" - : "border-gray-300 focus:border-blue-500 focus:ring-blue-500 dark:border-gray-700" - }`} - disabled={isLoading} - /> -
+ -
- -
+
+ setPassword(e.target.value)} - className={`mt-1 block w-full rounded-md border pr-10 px-3 py-2 shadow-sm focus:outline-none focus:ring-2 focus:ring-offset-2 dark:bg-gray-800 dark:text-white transition-colors ${ + value={username} + onChange={(e) => setUsername(e.target.value)} + className={`mt-1 block w-full rounded-md border px-3 py-2 shadow-sm focus:outline-none focus:ring-2 focus:ring-offset-2 dark:bg-gray-800 dark:text-white transition-colors ${ error ? "border-red-300 focus:border-red-500 focus:ring-red-500 dark:border-red-700" : "border-gray-300 focus:border-blue-500 focus:ring-blue-500 dark:border-gray-700" }`} disabled={isLoading} /> -
-
- - - - - {authType === "LOCAL" && ( -
-

- {t('dont_have_account')}{" "} - +

-
- )} + {t("password")} + +
+ setPassword(e.target.value)} + className={`mt-1 block w-full rounded-md border pr-10 px-3 py-2 shadow-sm focus:outline-none focus:ring-2 focus:ring-offset-2 dark:bg-gray-800 dark:text-white transition-colors ${ + error + ? "border-red-300 focus:border-red-500 focus:ring-red-500 dark:border-red-700" + : "border-gray-300 focus:border-blue-500 focus:ring-blue-500 dark:border-gray-700" + }`} + disabled={isLoading} + /> + +
+
+ + + + + {authType === "LOCAL" && ( +
+

+ {t("dont_have_account")}{" "} + + {t("sign_up")} + +

+
+ )} ); } diff --git a/surfsense_web/app/(home)/login/page.tsx b/surfsense_web/app/(home)/login/page.tsx index 8ce073d5..29455d38 100644 --- a/surfsense_web/app/(home)/login/page.tsx +++ b/surfsense_web/app/(home)/login/page.tsx @@ -3,9 +3,9 @@ import { Loader2 } from "lucide-react"; import { AnimatePresence, motion } from "motion/react"; import { useSearchParams } from "next/navigation"; +import { useTranslations } from "next-intl"; import { Suspense, useEffect, useState } from "react"; import { toast } from "sonner"; -import { useTranslations } from "next-intl"; import { Logo } from "@/components/Logo"; import { getAuthErrorDetails, shouldRetry } from "@/lib/auth-errors"; import { AmbientBackground } from "./AmbientBackground"; @@ -13,8 +13,8 @@ import { GoogleLoginButton } from "./GoogleLoginButton"; import { LocalLoginForm } from "./LocalLoginForm"; function LoginContent() { - const t = useTranslations('auth'); - const tCommon = useTranslations('common'); + const t = useTranslations("auth"); + const tCommon = useTranslations("common"); const [authType, setAuthType] = useState(null); const [isLoading, setIsLoading] = useState(true); const [urlError, setUrlError] = useState<{ title: string; message: string } | null>(null); @@ -29,15 +29,15 @@ function LoginContent() { // Show registration success message if (registered === "true") { - toast.success(t('register_success'), { - description: t('login_subtitle'), + toast.success(t("register_success"), { + description: t("login_subtitle"), duration: 5000, }); } // Show logout confirmation if (logout === "true") { - toast.success(tCommon('success'), { + toast.success(tCommon("success"), { description: "You have been securely logged out", duration: 3000, }); @@ -96,7 +96,7 @@ function LoginContent() {
- {tCommon('loading')} + {tCommon("loading")}
@@ -113,7 +113,7 @@ function LoginContent() {

- {t('sign_in')} + {t("sign_in")}

{/* URL Error Display */} diff --git a/surfsense_web/app/(home)/register/page.tsx b/surfsense_web/app/(home)/register/page.tsx index 701d0169..9e3b42e2 100644 --- a/surfsense_web/app/(home)/register/page.tsx +++ b/surfsense_web/app/(home)/register/page.tsx @@ -3,16 +3,16 @@ import { AnimatePresence, motion } from "motion/react"; import Link from "next/link"; import { useRouter } from "next/navigation"; +import { useTranslations } from "next-intl"; import { useEffect, useState } from "react"; import { toast } from "sonner"; -import { useTranslations } from "next-intl"; import { Logo } from "@/components/Logo"; import { getAuthErrorDetails, isNetworkError, shouldRetry } from "@/lib/auth-errors"; import { AmbientBackground } from "../login/AmbientBackground"; export default function RegisterPage() { - const t = useTranslations('auth'); - const tCommon = useTranslations('common'); + const t = useTranslations("auth"); + const tCommon = useTranslations("common"); const [email, setEmail] = useState(""); const [password, setPassword] = useState(""); const [confirmPassword, setConfirmPassword] = useState(""); @@ -34,10 +34,10 @@ export default function RegisterPage() { // Form validation if (password !== confirmPassword) { - setError(t('passwords_no_match')); - setErrorTitle(t('password_mismatch')); - toast.error(t('password_mismatch'), { - description: t('passwords_no_match_desc'), + setError(t("passwords_no_match")); + setErrorTitle(t("password_mismatch")); + toast.error(t("password_mismatch"), { + description: t("passwords_no_match_desc"), duration: 4000, }); return; @@ -48,7 +48,7 @@ export default function RegisterPage() { setErrorTitle(null); // Show loading toast - const loadingToast = toast.loading(t('creating_account')); + const loadingToast = toast.loading(t("creating_account")); try { const response = await fetch(`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/auth/register`, { @@ -86,9 +86,9 @@ export default function RegisterPage() { } // Success toast - toast.success(t('register_success'), { + toast.success(t("register_success"), { id: loadingToast, - description: t('redirecting_login'), + description: t("redirecting_login"), duration: 2000, }); @@ -123,7 +123,7 @@ export default function RegisterPage() { // Add retry action if the error is retryable if (shouldRetry(errorCode)) { toastOptions.action = { - label: tCommon('retry'), + label: tCommon("retry"), onClick: () => handleSubmit(e), }; } @@ -140,7 +140,7 @@ export default function RegisterPage() {

- {t('create_account')} + {t("create_account")}

@@ -212,7 +212,7 @@ export default function RegisterPage() { htmlFor="email" className="block text-sm font-medium text-gray-700 dark:text-gray-300" > - {t('email')} + {t("email")} - {t('password')} + {t("password")} - {t('confirm_password')} + {t("confirm_password")} - {isLoading ? t('creating_account_btn') : t('register')} + {isLoading ? t("creating_account_btn") : t("register")}

- {t('already_have_account')}{" "} + {t("already_have_account")}{" "} - {t('sign_in')} + {t("sign_in")}

diff --git a/surfsense_web/app/dashboard/[search_space_id]/client-layout.tsx b/surfsense_web/app/dashboard/[search_space_id]/client-layout.tsx index 11ef393a..55da177a 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/client-layout.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/client-layout.tsx @@ -2,17 +2,17 @@ import { Loader2 } from "lucide-react"; import { usePathname, useRouter } from "next/navigation"; -import type React from "react"; -import { useEffect, useState, useMemo } from "react"; import { useTranslations } from "next-intl"; +import type React from "react"; +import { useEffect, useMemo, useState } from "react"; import { DashboardBreadcrumb } from "@/components/dashboard-breadcrumb"; +import { LanguageSwitcher } from "@/components/LanguageSwitcher"; import { AppSidebarProvider } from "@/components/sidebar/AppSidebarProvider"; import { ThemeTogglerComponent } from "@/components/theme/theme-toggle"; import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"; import { Separator } from "@/components/ui/separator"; import { SidebarInset, SidebarProvider, SidebarTrigger } from "@/components/ui/sidebar"; import { useLLMPreferences } from "@/hooks/use-llm-configs"; -import { LanguageSwitcher } from "@/components/LanguageSwitcher"; export function DashboardClientLayout({ children, @@ -25,7 +25,7 @@ export function DashboardClientLayout({ navSecondary: any[]; navMain: any[]; }) { - const t = useTranslations('dashboard'); + const t = useTranslations("dashboard"); const router = useRouter(); const pathname = usePathname(); const searchSpaceIdNum = Number(searchSpaceId); @@ -37,14 +37,14 @@ export function DashboardClientLayout({ const isOnboardingPage = pathname?.includes("/onboard"); // Translate navigation items - const tNavMenu = useTranslations('nav_menu'); + const tNavMenu = useTranslations("nav_menu"); const translatedNavMain = useMemo(() => { return navMain.map((item) => ({ ...item, - title: tNavMenu(item.title.toLowerCase().replace(/ /g, '_')), + title: tNavMenu(item.title.toLowerCase().replace(/ /g, "_")), items: item.items?.map((subItem: any) => ({ ...subItem, - title: tNavMenu(subItem.title.toLowerCase().replace(/ /g, '_')), + title: tNavMenu(subItem.title.toLowerCase().replace(/ /g, "_")), })), })); }, [navMain, tNavMenu]); @@ -52,7 +52,7 @@ export function DashboardClientLayout({ const translatedNavSecondary = useMemo(() => { return navSecondary.map((item) => ({ ...item, - title: item.title === 'All Search Spaces' ? tNavMenu('all_search_spaces') : item.title, + title: item.title === "All Search Spaces" ? tNavMenu("all_search_spaces") : item.title, })); }, [navSecondary, tNavMenu]); @@ -98,8 +98,8 @@ export function DashboardClientLayout({
- {t('loading_config')} - {t('checking_llm_prefs')} + {t("loading_config")} + {t("checking_llm_prefs")} @@ -116,9 +116,9 @@ export function DashboardClientLayout({ - {t('config_error')} + {t("config_error")} - {t('failed_load_llm_config')} + {t("failed_load_llm_config")}

{error}

diff --git a/surfsense_web/app/dashboard/[search_space_id]/connectors/(manage)/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/connectors/(manage)/page.tsx index ffdb3247..f107ffa6 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/connectors/(manage)/page.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/connectors/(manage)/page.tsx @@ -12,9 +12,9 @@ import { } from "lucide-react"; import { motion } from "motion/react"; import { useParams, useRouter } from "next/navigation"; +import { useTranslations } from "next-intl"; import { useEffect, useState } from "react"; import { toast } from "sonner"; -import { useTranslations } from "next-intl"; import { AlertDialog, AlertDialogAction, @@ -63,12 +63,12 @@ import { useSearchSourceConnectors } from "@/hooks/use-search-source-connectors" import { cn } from "@/lib/utils"; export default function ConnectorsPage() { - const t = useTranslations('connectors'); - const tCommon = useTranslations('common'); - + const t = useTranslations("connectors"); + const tCommon = useTranslations("common"); + // Helper function to format date with time const formatDateTime = (dateString: string | null): string => { - if (!dateString) return t('never'); + if (!dateString) return t("never"); const date = new Date(dateString); return new Intl.DateTimeFormat("en-US", { @@ -107,7 +107,7 @@ export default function ConnectorsPage() { useEffect(() => { if (error) { - toast.error(t('failed_load')); + toast.error(t("failed_load")); console.error("Error fetching connectors:", error); } }, [error, t]); @@ -118,10 +118,10 @@ export default function ConnectorsPage() { try { await deleteConnector(connectorToDelete); - toast.success(t('delete_success')); + toast.success(t("delete_success")); } catch (error) { console.error("Error deleting connector:", error); - toast.error(t('delete_failed')); + toast.error(t("delete_failed")); } finally { setConnectorToDelete(null); } @@ -145,10 +145,10 @@ export default function ConnectorsPage() { const endDateStr = endDate ? format(endDate, "yyyy-MM-dd") : undefined; await indexConnector(selectedConnectorForIndexing, searchSpaceId, startDateStr, endDateStr); - toast.success(t('indexing_started')); + toast.success(t("indexing_started")); } catch (error) { console.error("Error indexing connector content:", error); - toast.error(error instanceof Error ? error.message : t('indexing_failed')); + toast.error(error instanceof Error ? error.message : t("indexing_failed")); } finally { setIndexingConnectorId(null); setSelectedConnectorForIndexing(null); @@ -162,10 +162,10 @@ export default function ConnectorsPage() { setIndexingConnectorId(connectorId); try { await indexConnector(connectorId, searchSpaceId); - toast.success(t('indexing_started')); + toast.success(t("indexing_started")); } catch (error) { console.error("Error indexing connector content:", error); - toast.error(error instanceof Error ? error.message : t('indexing_failed')); + toast.error(error instanceof Error ? error.message : t("indexing_failed")); } finally { setIndexingConnectorId(null); } @@ -258,21 +258,19 @@ export default function ConnectorsPage() { className="mb-8 flex items-center justify-between" >
-

{t('title')}

-

- {t('subtitle')} -

+

{t("title")}

+

{t("subtitle")}

- {t('your_connectors')} - {t('view_manage')} + {t("your_connectors")} + {t("view_manage")} {isLoading ? ( @@ -284,13 +282,11 @@ export default function ConnectorsPage() {
) : connectors.length === 0 ? (
-

{t('no_connectors')}

-

- {t('no_connectors_desc')} -

+

{t("no_connectors")}

+

{t("no_connectors_desc")}

) : ( @@ -298,11 +294,11 @@ export default function ConnectorsPage() { - {t('name')} - {t('type')} - {t('last_indexed')} - {t('periodic')} - {t('actions')} + {t("name")} + {t("type")} + {t("last_indexed")} + {t("periodic")} + {t("actions")} @@ -313,7 +309,7 @@ export default function ConnectorsPage() { {connector.is_indexable ? formatDateTime(connector.last_indexed_at) - : t('not_indexable')} + : t("not_indexable")} {connector.is_indexable ? ( @@ -368,11 +364,11 @@ export default function ConnectorsPage() { ) : ( )} - {t('index_date_range')} + {t("index_date_range")} -

{t('index_date_range')}

+

{t("index_date_range")}

@@ -390,11 +386,11 @@ export default function ConnectorsPage() { ) : ( )} - {t('quick_index')} + {t("quick_index")} -

{t('quick_index_auto')}

+

{t("quick_index_auto")}

@@ -429,7 +425,7 @@ export default function ConnectorsPage() { } > - {tCommon('edit')} + {tCommon("edit")} @@ -440,25 +436,25 @@ export default function ConnectorsPage() { onClick={() => setConnectorToDelete(connector.id)} > - {tCommon('delete')} + {tCommon("delete")} - {t('delete_connector')} + {t("delete_connector")} - {t('delete_confirm')} + {t("delete_confirm")} setConnectorToDelete(null)}> - {tCommon('cancel')} + {tCommon("cancel")} - {tCommon('delete')} + {tCommon("delete")} @@ -478,15 +474,13 @@ export default function ConnectorsPage() { - {t('select_date_range')} - - {t('select_date_range_desc')} - + {t("select_date_range")} + {t("select_date_range_desc")}
- + @@ -512,7 +506,7 @@ export default function ConnectorsPage() {
- + @@ -542,7 +536,7 @@ export default function ConnectorsPage() { setEndDate(undefined); }} > - {t('clear_dates')} + {t("clear_dates")}
@@ -580,9 +574,9 @@ export default function ConnectorsPage() { setEndDate(undefined); }} > - {tCommon('cancel')} + {tCommon("cancel")} - +
diff --git a/surfsense_web/app/dashboard/[search_space_id]/connectors/add/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/connectors/add/page.tsx index 4937da17..b92a9539 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/connectors/add/page.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/connectors/add/page.tsx @@ -9,8 +9,8 @@ import { import { AnimatePresence, motion, type Variants } from "motion/react"; import Link from "next/link"; import { useParams } from "next/navigation"; -import { useState } from "react"; import { useTranslations } from "next-intl"; +import { useState } from "react"; import { Badge } from "@/components/ui/badge"; import { Button } from "@/components/ui/button"; import { Card, CardContent, CardFooter, CardHeader } from "@/components/ui/card"; @@ -239,7 +239,7 @@ const cardVariants: Variants = { }; export default function ConnectorsPage() { - const t = useTranslations('add_connector'); + const t = useTranslations("add_connector"); const params = useParams(); const searchSpaceId = params.search_space_id as string; const [expandedCategories, setExpandedCategories] = useState([ @@ -268,11 +268,9 @@ export default function ConnectorsPage() { className="mb-12 text-center" >

- {t('title')} + {t("title")}

-

- {t('subtitle')} -

+

{t("subtitle")}

- {t('coming_soon')} + {t("coming_soon")} )} {connector.status === "connected" && ( @@ -351,7 +349,7 @@ export default function ConnectorsPage() { variant="outline" className="text-xs bg-green-100 dark:bg-green-950 text-green-800 dark:text-green-300 border-green-200 dark:border-green-800" > - {t('connected')} + {t("connected")} )} @@ -359,7 +357,9 @@ export default function ConnectorsPage() { -

{t(connector.description)}

+

+ {t(connector.description)} +

@@ -369,7 +369,7 @@ export default function ConnectorsPage() { className="w-full" > )} {connector.status === "connected" && ( @@ -395,7 +395,7 @@ export default function ConnectorsPage() { variant="outline" className="w-full border-green-500 text-green-600 hover:bg-green-50 dark:hover:bg-green-950" > - {t('manage')} + {t("manage")} )} diff --git a/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/components/DocumentsFilters.tsx b/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/components/DocumentsFilters.tsx index 4a4964ee..978cdf21 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/components/DocumentsFilters.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/components/DocumentsFilters.tsx @@ -2,8 +2,8 @@ import { CircleAlert, CircleX, Columns3, Filter, ListFilter, Trash } from "lucide-react"; import { AnimatePresence, motion, type Variants } from "motion/react"; -import React, { useMemo, useRef } from "react"; import { useTranslations } from "next-intl"; +import React, { useMemo, useRef } from "react"; import { AlertDialog, AlertDialogAction, @@ -56,7 +56,7 @@ export function DocumentsFilters({ columnVisibility: ColumnVisibility; onToggleColumn: (id: keyof ColumnVisibility, checked: boolean) => void; }) { - const t = useTranslations('documents'); + const t = useTranslations("documents"); const id = React.useId(); const inputRef = useRef(null); @@ -92,9 +92,9 @@ export function DocumentsFilters({ className="peer min-w-60 ps-9" value={searchValue} onChange={(e) => onSearch(e.target.value)} - placeholder={t('filter_placeholder')} + placeholder={t("filter_placeholder")} type="text" - aria-label={t('filter_placeholder')} + aria-label={t("filter_placeholder")} /> void; }) { - const t = useTranslations('documents'); + const t = useTranslations("documents"); const sorted = React.useMemo( () => sortDocuments(documents, sortKey, sortDesc), [documents, sortKey, sortDesc] @@ -103,15 +103,15 @@ export function DocumentsTableShell({
-

{t('loading')}

+

{t("loading")}

) : error ? (
-

{t('error_loading')}

+

{t("error_loading")}

@@ -119,7 +119,7 @@ export function DocumentsTableShell({
-

{t('no_documents')}

+

{t("no_documents")}

) : ( @@ -142,7 +142,7 @@ export function DocumentsTableShell({ className="flex h-full w-full cursor-pointer select-none items-center justify-between gap-2" onClick={() => onSortHeader("title")} > - {t('title')} + {t("title")} {sortKey === "title" ? ( sortDesc ? ( @@ -160,7 +160,7 @@ export function DocumentsTableShell({ className="flex h-full w-full cursor-pointer select-none items-center justify-between gap-2" onClick={() => onSortHeader("document_type")} > - {t('type')} + {t("type")} {sortKey === "document_type" ? ( sortDesc ? ( @@ -172,7 +172,7 @@ export function DocumentsTableShell({ )} {columnVisibility.content && ( - {t('content_summary')} + {t("content_summary")} )} {columnVisibility.created_at && ( @@ -266,7 +266,7 @@ export function DocumentsTableShell({ content={doc.content} trigger={ } /> @@ -337,7 +337,7 @@ export function DocumentsTableShell({ size="sm" className="w-fit text-xs p-0 h-auto" > - {t('view_full')} + {t("view_full")} } /> diff --git a/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/components/PaginationControls.tsx b/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/components/PaginationControls.tsx index b53b66f4..d87fa2dc 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/components/PaginationControls.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/components/PaginationControls.tsx @@ -39,7 +39,7 @@ export function PaginationControls({ canNext: boolean; id: string; }) { - const t = useTranslations('documents'); + const t = useTranslations("documents"); const start = total === 0 ? 0 : pageIndex * pageSize + 1; const end = Math.min((pageIndex + 1) * pageSize, total); @@ -52,7 +52,7 @@ export function PaginationControls({ transition={{ type: "spring", stiffness: 300, damping: 30 }} >
@@ -1023,7 +1027,7 @@ function LogsPagination({ table, id, t }: { table: any; id: string; t: (key: str animate={{ opacity: 1, x: 0 }} > - - - - {languages.find(lang => lang.code === locale)?.name || 'English'} - - - - {languages.map((language) => ( - - - {language.flag} - {language.name} - - - ))} - - - ); + return ( + + ); } - diff --git a/surfsense_web/components/chat/ChatInputGroup.tsx b/surfsense_web/components/chat/ChatInputGroup.tsx index dfce2222..09c759e6 100644 --- a/surfsense_web/components/chat/ChatInputGroup.tsx +++ b/surfsense_web/components/chat/ChatInputGroup.tsx @@ -4,7 +4,6 @@ import { ChatInput } from "@llamaindex/chat-ui"; import { Brain, Check, FolderOpen, Zap } from "lucide-react"; import { useParams } from "next/navigation"; import React, { Suspense, useCallback, useState } from "react"; -import type { ResearchMode } from "@/components/chat"; import { ConnectorButton as ConnectorButtonComponent } from "@/components/chat/ConnectorComponents"; import { DocumentsDataTable } from "@/components/chat/DocumentsDataTable"; import { Badge } from "@/components/ui/badge"; @@ -243,74 +242,6 @@ const SearchModeSelector = React.memo( SearchModeSelector.displayName = "SearchModeSelector"; -const ResearchModeSelector = React.memo( - ({ - researchMode, - onResearchModeChange, - }: { - researchMode?: ResearchMode; - onResearchModeChange?: (mode: ResearchMode) => void; - }) => { - const handleValueChange = React.useCallback( - (value: string) => { - onResearchModeChange?.(value as ResearchMode); - }, - [onResearchModeChange] - ); - - // Memoize mode options to prevent recreation - const modeOptions = React.useMemo( - () => [ - { value: "QNA", label: "Q&A", shortLabel: "Q&A" }, - { - value: "REPORT_GENERAL", - label: "General Report", - shortLabel: "General", - }, - { - value: "REPORT_DEEP", - label: "Deep Report", - shortLabel: "Deep", - }, - { - value: "REPORT_DEEPER", - label: "Deeper Report", - shortLabel: "Deeper", - }, - ], - [] - ); - - return ( -
- Mode: - -
- ); - } -); - -ResearchModeSelector.displayName = "ResearchModeSelector"; - const LLMSelector = React.memo(() => { const { search_space_id } = useParams(); const searchSpaceId = Number(search_space_id); @@ -473,8 +404,6 @@ const CustomChatInputOptions = React.memo( selectedConnectors, searchMode, onSearchModeChange, - researchMode, - onResearchModeChange, }: { onDocumentSelectionChange?: (documents: Document[]) => void; selectedDocuments?: Document[]; @@ -482,8 +411,6 @@ const CustomChatInputOptions = React.memo( selectedConnectors?: string[]; searchMode?: "DOCUMENTS" | "CHUNKS"; onSearchModeChange?: (mode: "DOCUMENTS" | "CHUNKS") => void; - researchMode?: ResearchMode; - onResearchModeChange?: (mode: ResearchMode) => void; }) => { // Memoize the loading fallback to prevent recreation const loadingFallback = React.useMemo( @@ -506,10 +433,6 @@ const CustomChatInputOptions = React.memo( /> -
); @@ -526,8 +449,6 @@ export const ChatInputUI = React.memo( selectedConnectors, searchMode, onSearchModeChange, - researchMode, - onResearchModeChange, }: { onDocumentSelectionChange?: (documents: Document[]) => void; selectedDocuments?: Document[]; @@ -535,8 +456,6 @@ export const ChatInputUI = React.memo( selectedConnectors?: string[]; searchMode?: "DOCUMENTS" | "CHUNKS"; onSearchModeChange?: (mode: "DOCUMENTS" | "CHUNKS") => void; - researchMode?: ResearchMode; - onResearchModeChange?: (mode: ResearchMode) => void; }) => { return ( @@ -551,8 +470,6 @@ export const ChatInputUI = React.memo( selectedConnectors={selectedConnectors} searchMode={searchMode} onSearchModeChange={onSearchModeChange} - researchMode={researchMode} - onResearchModeChange={onResearchModeChange} /> ); diff --git a/surfsense_web/components/chat/ChatInterface.tsx b/surfsense_web/components/chat/ChatInterface.tsx index 67b5b687..acd22ad9 100644 --- a/surfsense_web/components/chat/ChatInterface.tsx +++ b/surfsense_web/components/chat/ChatInterface.tsx @@ -1,7 +1,6 @@ "use client"; import { type ChatHandler, ChatSection as LlamaIndexChatSection } from "@llamaindex/chat-ui"; -import type { ResearchMode } from "@/components/chat"; import { ChatInputUI } from "@/components/chat/ChatInputGroup"; import { ChatMessagesUI } from "@/components/chat/ChatMessages"; import type { Document } from "@/hooks/use-documents"; @@ -14,8 +13,6 @@ interface ChatInterfaceProps { selectedConnectors?: string[]; searchMode?: "DOCUMENTS" | "CHUNKS"; onSearchModeChange?: (mode: "DOCUMENTS" | "CHUNKS") => void; - researchMode?: ResearchMode; - onResearchModeChange?: (mode: ResearchMode) => void; } export default function ChatInterface({ @@ -26,8 +23,6 @@ export default function ChatInterface({ selectedConnectors = [], searchMode, onSearchModeChange, - researchMode, - onResearchModeChange, }: ChatInterfaceProps) { return ( @@ -41,8 +36,6 @@ export default function ChatInterface({ selectedConnectors={selectedConnectors} searchMode={searchMode} onSearchModeChange={onSearchModeChange} - researchMode={researchMode} - onResearchModeChange={onResearchModeChange} />
diff --git a/surfsense_web/components/chat/ConnectorComponents.tsx b/surfsense_web/components/chat/ConnectorComponents.tsx index 19058b4b..3866d055 100644 --- a/surfsense_web/components/chat/ConnectorComponents.tsx +++ b/surfsense_web/components/chat/ConnectorComponents.tsx @@ -1,35 +1,8 @@ -import { ChevronDown, FileText, MessageCircle, Plus } from "lucide-react"; +import { ChevronDown, Plus } from "lucide-react"; import type React from "react"; import { Button } from "@/components/ui/button"; import { getConnectorIcon } from "@/contracts/enums/connectorIcons"; -import type { Connector, ResearchMode } from "./types"; - -export const researcherOptions: { - value: ResearchMode; - label: string; - icon: React.ReactNode; -}[] = [ - { - value: "QNA", - label: "Q/A", - icon: getConnectorIcon("GENERAL"), - }, - { - value: "REPORT_GENERAL", - label: "General", - icon: getConnectorIcon("GENERAL"), - }, - { - value: "REPORT_DEEP", - label: "Deep", - icon: getConnectorIcon("DEEP"), - }, - { - value: "REPORT_DEEPER", - label: "Deeper", - icon: getConnectorIcon("DEEPER"), - }, -]; +import type { Connector } from "./types"; /** * Displays a small icon for a connector type @@ -134,93 +107,3 @@ export const ConnectorButton = ({ ); }; - -// New component for Research Mode Control with Q/A and Report toggle -type ResearchModeControlProps = { - value: ResearchMode; - onChange: (value: ResearchMode) => void; -}; - -export const ResearchModeControl = ({ value, onChange }: ResearchModeControlProps) => { - // Determine if we're in Q/A mode or Report mode - const isQnaMode = value === "QNA"; - const isReportMode = value.startsWith("REPORT_"); - - // Get the current report sub-mode - const getCurrentReportMode = () => { - if (!isReportMode) return "GENERAL"; - return value.replace("REPORT_", "") as "GENERAL" | "DEEP" | "DEEPER"; - }; - - const reportSubOptions = [ - { value: "GENERAL", label: "General", icon: getConnectorIcon("GENERAL") }, - { value: "DEEP", label: "Deep", icon: getConnectorIcon("DEEP") }, - { value: "DEEPER", label: "Deeper", icon: getConnectorIcon("DEEPER") }, - ]; - - const handleModeToggle = (mode: "QNA" | "REPORT") => { - if (mode === "QNA") { - onChange("QNA"); - } else { - // Default to GENERAL for Report mode - onChange("REPORT_GENERAL"); - } - }; - - const handleReportSubModeChange = (subMode: string) => { - onChange(`REPORT_${subMode}` as ResearchMode); - }; - - return ( -
- {/* Main Q/A vs Report Toggle */} -
- - -
- - {/* Report Sub-options (only show when in Report mode) */} - {isReportMode && ( -
- {reportSubOptions.map((option) => ( - - ))} -
- )} -
- ); -}; diff --git a/surfsense_web/components/chat/types.ts b/surfsense_web/components/chat/types.ts index 1544dd0c..8e4e1e29 100644 --- a/surfsense_web/components/chat/types.ts +++ b/surfsense_web/components/chat/types.ts @@ -47,4 +47,4 @@ export interface ToolInvocationUIPart { toolInvocation: ToolInvocation; } -export type ResearchMode = "QNA" | "REPORT_GENERAL" | "REPORT_DEEP" | "REPORT_DEEPER"; +export type ResearchMode = "QNA"; diff --git a/surfsense_web/components/dashboard-breadcrumb.tsx b/surfsense_web/components/dashboard-breadcrumb.tsx index 93bb770f..db8d5981 100644 --- a/surfsense_web/components/dashboard-breadcrumb.tsx +++ b/surfsense_web/components/dashboard-breadcrumb.tsx @@ -1,8 +1,8 @@ "use client"; import { usePathname } from "next/navigation"; -import React from "react"; import { useTranslations } from "next-intl"; +import React from "react"; import { Breadcrumb, BreadcrumbItem, @@ -18,7 +18,7 @@ interface BreadcrumbItemInterface { } export function DashboardBreadcrumb() { - const t = useTranslations('breadcrumb'); + const t = useTranslations("breadcrumb"); const pathname = usePathname(); // Parse the pathname to create breadcrumb items @@ -27,11 +27,14 @@ export function DashboardBreadcrumb() { const breadcrumbs: BreadcrumbItemInterface[] = []; // Always start with Dashboard - breadcrumbs.push({ label: t('dashboard'), href: "/dashboard" }); + breadcrumbs.push({ label: t("dashboard"), href: "/dashboard" }); // Handle search space if (segments[0] === "dashboard" && segments[1]) { - breadcrumbs.push({ label: `${t('search_space')} ${segments[1]}`, href: `/dashboard/${segments[1]}` }); + breadcrumbs.push({ + label: `${t("search_space")} ${segments[1]}`, + href: `/dashboard/${segments[1]}`, + }); // Handle specific sections if (segments[2]) { @@ -40,13 +43,13 @@ export function DashboardBreadcrumb() { // Map section names to more readable labels const sectionLabels: Record = { - researcher: t('researcher'), - documents: t('documents'), - connectors: t('connectors'), - podcasts: t('podcasts'), - logs: t('logs'), - chats: t('chats'), - settings: t('settings'), + researcher: t("researcher"), + documents: t("documents"), + connectors: t("connectors"), + podcasts: t("podcasts"), + logs: t("logs"), + chats: t("chats"), + settings: t("settings"), }; sectionLabel = sectionLabels[section] || sectionLabel; @@ -59,14 +62,14 @@ export function DashboardBreadcrumb() { // Handle documents sub-sections if (section === "documents") { const documentLabels: Record = { - upload: t('upload_documents'), - youtube: t('add_youtube'), - webpage: t('add_webpages'), + upload: t("upload_documents"), + youtube: t("add_youtube"), + webpage: t("add_webpages"), }; const documentLabel = documentLabels[subSection] || subSectionLabel; breadcrumbs.push({ - label: t('documents'), + label: t("documents"), href: `/dashboard/${segments[1]}/documents`, }); breadcrumbs.push({ label: documentLabel }); @@ -108,13 +111,13 @@ export function DashboardBreadcrumb() { } const connectorLabels: Record = { - add: t('add_connector'), - manage: t('manage_connectors'), + add: t("add_connector"), + manage: t("manage_connectors"), }; const connectorLabel = connectorLabels[subSection] || subSectionLabel; breadcrumbs.push({ - label: t('connectors'), + label: t("connectors"), href: `/dashboard/${segments[1]}/connectors`, }); breadcrumbs.push({ label: connectorLabel }); @@ -123,12 +126,12 @@ export function DashboardBreadcrumb() { // Handle other sub-sections const subSectionLabels: Record = { - upload: t('upload_documents'), - youtube: t('add_youtube'), - webpage: t('add_webpages'), - add: t('add_connector'), - edit: t('edit_connector'), - manage: t('manage'), + upload: t("upload_documents"), + youtube: t("add_youtube"), + webpage: t("add_webpages"), + add: t("add_connector"), + edit: t("edit_connector"), + manage: t("manage"), }; subSectionLabel = subSectionLabels[subSection] || subSectionLabel; diff --git a/surfsense_web/components/homepage/footer-new.tsx b/surfsense_web/components/homepage/footer-new.tsx index fb4c5753..e0f8eb6b 100644 --- a/surfsense_web/components/homepage/footer-new.tsx +++ b/surfsense_web/components/homepage/footer-new.tsx @@ -1,188 +1,180 @@ +import { + IconBrandDiscord, + IconBrandGithub, + IconBrandLinkedin, + IconBrandTwitter, +} from "@tabler/icons-react"; import Image from "next/image"; import Link from "next/link"; import React from "react"; -import { - IconBrandTwitter, - IconBrandLinkedin, - IconBrandGithub, - IconBrandDiscord, -} from "@tabler/icons-react"; import { Logo } from "@/components/Logo"; export function FooterNew() { - const pages = [ - // { - // title: "All Products", - // href: "#", - // }, - // { - // title: "Studio", - // href: "#", - // }, - // { - // title: "Clients", - // href: "#", - // }, - { - title: "Pricing", - href: "/pricing", - }, - { - title: "Docs", - href: "/docs", - }, - // { - // title: "Blog", - // href: "#", - // }, - ]; + const pages = [ + // { + // title: "All Products", + // href: "#", + // }, + // { + // title: "Studio", + // href: "#", + // }, + // { + // title: "Clients", + // href: "#", + // }, + { + title: "Pricing", + href: "/pricing", + }, + { + title: "Docs", + href: "/docs", + }, + // { + // title: "Blog", + // href: "#", + // }, + ]; - const socials = [ - { - title: "Twitter", - href: "https://x.com/mod_setter", - icon: IconBrandTwitter, - }, - { - title: "LinkedIn", - href: "https://www.linkedin.com/in/rohan-verma-sde/", - icon: IconBrandLinkedin, - }, - { - title: "GitHub", - href: "https://github.com/MODSetter", - icon: IconBrandGithub, - }, - { - title: "Discord", - href: "https://discord.gg/ejRNvftDp9", - icon: IconBrandDiscord, - }, - ]; - const legals = [ - { - title: "Privacy Policy", - href: "/privacy", - }, - { - title: "Terms of Service", - href: "/terms", - }, - // { - // title: "Cookie Policy", - // href: "#", - // }, - ]; + const socials = [ + { + title: "Twitter", + href: "https://x.com/mod_setter", + icon: IconBrandTwitter, + }, + { + title: "LinkedIn", + href: "https://www.linkedin.com/in/rohan-verma-sde/", + icon: IconBrandLinkedin, + }, + { + title: "GitHub", + href: "https://github.com/MODSetter", + icon: IconBrandGithub, + }, + { + title: "Discord", + href: "https://discord.gg/ejRNvftDp9", + icon: IconBrandDiscord, + }, + ]; + const legals = [ + { + title: "Privacy Policy", + href: "/privacy", + }, + { + title: "Terms of Service", + href: "/terms", + }, + // { + // title: "Cookie Policy", + // href: "#", + // }, + ]; - const signups = [ - { - title: "Sign In", - href: "/login", - }, - // { - // title: "Login", - // href: "#", - // }, - // { - // title: "Forgot Password", - // href: "#", - // }, - ]; - return ( -
-
-
-
- - SurfSense -
+ const signups = [ + { + title: "Sign In", + href: "/login", + }, + // { + // title: "Login", + // href: "#", + // }, + // { + // title: "Forgot Password", + // href: "#", + // }, + ]; + return ( +
+
+
+
+ + SurfSense +
-
- © SurfSense 2025. All rights reserved. -
-
-
-
-

- Pages -

-
    - {pages.map((page, idx) => ( -
  • - - {page.title} - -
  • - ))} -
-
+
© SurfSense 2025. All rights reserved.
+
+
+
+

+ Pages +

+
    + {pages.map((page, idx) => ( +
  • + + {page.title} + +
  • + ))} +
+
-
-

- Socials -

-
    - {socials.map((social, idx) => { - const Icon = social.icon; - return ( -
  • - - - {social.title} - -
  • - ); - })} -
-
+
+

+ Socials +

+
    + {socials.map((social, idx) => { + const Icon = social.icon; + return ( +
  • + + + {social.title} + +
  • + ); + })} +
+
-
-

- Legal -

-
    - {legals.map((legal, idx) => ( -
  • - - {legal.title} - -
  • - ))} -
-
-
-

- Register -

-
    - {signups.map((auth, idx) => ( -
  • - - {auth.title} - -
  • - ))} -
-
-
-
-

- SurfSense -

-
- ); +
+

+ Legal +

+
    + {legals.map((legal, idx) => ( +
  • + + {legal.title} + +
  • + ))} +
+
+
+

+ Register +

+
    + {signups.map((auth, idx) => ( +
  • + + {auth.title} + +
  • + ))} +
+
+
+
+

+ SurfSense +

+
+ ); } diff --git a/surfsense_web/components/onboard/add-provider-step.tsx b/surfsense_web/components/onboard/add-provider-step.tsx index 9d48a56f..d7ed3aab 100644 --- a/surfsense_web/components/onboard/add-provider-step.tsx +++ b/surfsense_web/components/onboard/add-provider-step.tsx @@ -2,9 +2,9 @@ import { AlertCircle, Bot, Plus, Trash2 } from "lucide-react"; import { motion } from "motion/react"; +import { useTranslations } from "next-intl"; import { useState } from "react"; import { toast } from "sonner"; -import { useTranslations } from "next-intl"; import { Alert, AlertDescription } from "@/components/ui/alert"; import { Badge } from "@/components/ui/badge"; import { Button } from "@/components/ui/button"; @@ -35,7 +35,7 @@ export function AddProviderStep({ onConfigCreated, onConfigDeleted, }: AddProviderStepProps) { - const t = useTranslations('onboard'); + const t = useTranslations("onboard"); const { llmConfigs, createLLMConfig, deleteLLMConfig } = useLLMConfigs(searchSpaceId); const [isAddingNew, setIsAddingNew] = useState(false); const [formData, setFormData] = useState({ @@ -95,15 +95,13 @@ export function AddProviderStep({ {/* Info Alert */} - - {t('add_provider_instruction')} - + {t("add_provider_instruction")} {/* Existing Configurations */} {llmConfigs.length > 0 && (
-

{t('your_llm_configs')}

+

{t("your_llm_configs")}

{llmConfigs.map((config) => ( {config.provider}

- {t('model')}: {config.model_name} - {config.language && ` • ${t('language')}: ${config.language}`} - {config.api_base && ` • ${t('base')}: ${config.api_base}`} + {t("model")}: {config.model_name} + {config.language && ` • ${t("language")}: ${config.language}`} + {config.api_base && ` • ${t("base")}: ${config.api_base}`}

) : ( - {t('add_new_llm_provider')} - - {t('configure_new_provider')} - + {t("add_new_llm_provider")} + {t("configure_new_provider")}
- + handleInputChange("name", e.target.value)} required @@ -187,13 +181,13 @@ export function AddProviderStep({
- + handleInputChange("language", value)} > - + {LANGUAGES.map((language) => ( @@ -228,10 +222,10 @@ export function AddProviderStep({ {formData.provider === "CUSTOM" && (
- + handleInputChange("custom_provider", e.target.value)} required @@ -240,27 +234,27 @@ export function AddProviderStep({ )}
- + handleInputChange("model_name", e.target.value)} required /> {selectedProvider && (

- {t('examples')}: {selectedProvider.example} + {t("examples")}: {selectedProvider.example}

)}
- + handleInputChange("api_key", e.target.value)} required @@ -268,10 +262,10 @@ export function AddProviderStep({
- + handleInputChange("api_base", e.target.value)} /> @@ -287,7 +281,7 @@ export function AddProviderStep({
diff --git a/surfsense_web/components/onboard/assign-roles-step.tsx b/surfsense_web/components/onboard/assign-roles-step.tsx index bcb18c14..1a1557a3 100644 --- a/surfsense_web/components/onboard/assign-roles-step.tsx +++ b/surfsense_web/components/onboard/assign-roles-step.tsx @@ -2,8 +2,8 @@ import { AlertCircle, Bot, Brain, CheckCircle, Zap } from "lucide-react"; import { motion } from "motion/react"; -import { useEffect, useState } from "react"; import { useTranslations } from "next-intl"; +import { useEffect, useState } from "react"; import { Alert, AlertDescription } from "@/components/ui/alert"; import { Badge } from "@/components/ui/badge"; import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"; @@ -23,31 +23,31 @@ interface AssignRolesStepProps { } export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignRolesStepProps) { - const t = useTranslations('onboard'); + const t = useTranslations("onboard"); const { llmConfigs } = useLLMConfigs(searchSpaceId); const { preferences, updatePreferences } = useLLMPreferences(searchSpaceId); const ROLE_DESCRIPTIONS = { long_context: { icon: Brain, - title: t('long_context_llm_title'), - description: t('long_context_llm_desc'), + title: t("long_context_llm_title"), + description: t("long_context_llm_desc"), color: "bg-blue-100 text-blue-800 border-blue-200", - examples: t('long_context_llm_examples'), + examples: t("long_context_llm_examples"), }, fast: { icon: Zap, - title: t('fast_llm_title'), - description: t('fast_llm_desc'), + title: t("fast_llm_title"), + description: t("fast_llm_desc"), color: "bg-green-100 text-green-800 border-green-200", - examples: t('fast_llm_examples'), + examples: t("fast_llm_examples"), }, strategic: { icon: Bot, - title: t('strategic_llm_title'), - description: t('strategic_llm_desc'), + title: t("strategic_llm_title"), + description: t("strategic_llm_desc"), color: "bg-purple-100 text-purple-800 border-purple-200", - examples: t('strategic_llm_examples'), + examples: t("strategic_llm_examples"), }, }; @@ -111,10 +111,8 @@ export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignR return (
-

{t('no_llm_configs_found')}

-

- {t('add_provider_before_roles')} -

+

{t("no_llm_configs_found")}

+

{t("add_provider_before_roles")}

); } @@ -124,9 +122,7 @@ export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignR {/* Info Alert */} - - {t('assign_roles_instruction')} - + {t("assign_roles_instruction")} {/* Role Assignment Cards */} @@ -162,17 +158,17 @@ export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignR
- {t('use_cases')}: {role.examples} + {t("use_cases")}: {role.examples}
- +