@@ -89,6 +89,84 @@ EMBEDDINGS_DEVICE=cpu
8989# '{"device":"cuda","model_kwargs":{"torch_dtype":"float16"}}' - GPU with float16
9090EMBEDDINGS_MODEL_KWARGS =
9191
92+
93+ # =============================================================================
94+ # Web Search Service Configuration (for DeepResearch and PeopleHub)
95+ # =============================================================================
96+ # Serper API key for web search (get from https://serper.dev/)
97+ SERPER_API_KEY =
98+
99+ # Rate limit for web search requests (format: "N/period", e.g., "360/hour")
100+ # Default: 360/hour (matches Serper free tier)
101+ WEB_SEARCH_RATE_LIMIT = 360/hour
102+
103+ # Directory for web search analytics data (request counts, timing)
104+ # Default: ./data/web_search_analytics (created automatically)
105+ WEB_SEARCH_ANALYTICS_DIR =
106+
107+ # =============================================================================
108+ # Reranking Configuration (for search result reranking)
109+ # =============================================================================
110+ # Use local reranking model (True) or remote API (False)
111+ # Local: Uses sentence-transformers CrossEncoder (requires GPU/CPU resources)
112+ # Remote: Uses API-based reranking (e.g., Cohere, Jina)
113+ RERANKING_USE_LOCAL = true
114+
115+ # Local reranking model identifier (HuggingFace model ID)
116+ # Default: BAAI/bge-reranker-base
117+ # Alternatives: BAAI/bge-reranker-large (better quality), cross-encoder/ms-marco-MiniLM-L-6-v2 (faster)
118+ RERANKING_MODEL = BAAI/bge-reranker-base
119+
120+ # Device for local reranking: "cpu", "cuda", "cuda:0", etc.
121+ RERANKING_DEVICE = cpu
122+
123+ # Remote reranking API URL (only needed if RERANKING_USE_LOCAL=false)
124+ # Examples:
125+ # Cohere: https://api.cohere.ai/v1/rerank
126+ # Jina: https://api.jina.ai/v1/rerank
127+ RERANKING_API_URL =
128+
129+ # Remote reranking API key (only needed if using remote reranking)
130+ RERANKING_API_KEY =
131+
132+
133+ # =============================================================================
134+ # LangAlpha Quantitative Analysis Configuration
135+ # =============================================================================
136+ # Polygon.io API key for market data (get from https://polygon.io/)
137+ # Required for market data retrieval (OHLCV, ticker snapshots)
138+ POLYGON_API_KEY =
139+
140+ # Alpha Vantage API key for fundamental data (get from https://www.alphavantage.co/support/#api-key)
141+ # Required for fundamental data retrieval (company overview, financial statements)
142+ ALPHA_VANTAGE_API_KEY =
143+
144+ # LangAlpha LLM Model Configuration
145+ # Model for reasoning tasks (supervisor, planner, analyst)
146+ # Default: gpt-4o (OpenAI), can be overridden per provider
147+ LANGALPHA_REASONING_MODEL = gpt-4o
148+
149+ # Model for basic tasks (researcher, reporter, market agent)
150+ # Default: gpt-4o-mini (OpenAI), can be overridden per provider
151+ LANGALPHA_BASIC_MODEL = gpt-4o-mini
152+
153+ # Model for economic analysis tasks
154+ # Default: gpt-4o-mini (OpenAI), can be overridden per provider
155+ LANGALPHA_ECONOMIC_MODEL = gpt-4o-mini
156+
157+ # Model for coding/calculation tasks (coder agent)
158+ # Default: gpt-4o (OpenAI), can be overridden per provider
159+ LANGALPHA_CODING_MODEL = gpt-4o
160+
161+ # Budget level for LangAlpha agents
162+ # Options: "low" (uses economic models), "medium" (uses basic models), "high" (uses reasoning/coding models)
163+ # Default: medium
164+ # Low budget: All agents use economic model (cheapest)
165+ # Medium budget: All agents use basic model (balanced)
166+ # High budget: Agents use specialized models (most expensive, best quality)
167+ LANGALPHA_BUDGET_LEVEL = medium
168+
169+
92170# =============================================================================
93171# Sentinel Hub Configuration (for satellite imagery)
94172# =============================================================================
0 commit comments