From 8c202f63399da1b5e4a57fff14ef6642093d07c3 Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Fri, 8 Aug 2025 08:17:34 +0000 Subject: [PATCH 1/2] Add Universal AI Agent artifacts: clients, OpenAPI spec, and utilities Co-authored-by: escapethematrixmate01 --- universal-ai-artifacts/README.md | 47 + .../clients/node/package.json | 14 + .../clients/node/src/client.js | 117 +++ .../clients/node/src/examples.js | 31 + .../clients/node/src/ws_example.js | 36 + .../clients/python/examples.py | 25 + .../clients/python/requirements.txt | 1 + .../clients/python/universal_ai_client.py | 97 ++ universal-ai-artifacts/openapi/openapi.yaml | 928 ++++++++++++++++++ .../UniversalAI.postman_collection.json | 205 ++++ universal-ai-artifacts/scripts/curl.sh | 62 ++ universal-ai-artifacts/utils/rate_limit.js | 44 + universal-ai-artifacts/utils/rate_limit.py | 36 + 13 files changed, 1643 insertions(+) create mode 100644 universal-ai-artifacts/README.md create mode 100644 universal-ai-artifacts/clients/node/package.json create mode 100644 universal-ai-artifacts/clients/node/src/client.js create mode 100644 universal-ai-artifacts/clients/node/src/examples.js create mode 100644 universal-ai-artifacts/clients/node/src/ws_example.js create mode 100644 universal-ai-artifacts/clients/python/examples.py create mode 100644 universal-ai-artifacts/clients/python/requirements.txt create mode 100644 universal-ai-artifacts/clients/python/universal_ai_client.py create mode 100644 universal-ai-artifacts/openapi/openapi.yaml create mode 100644 universal-ai-artifacts/postman/UniversalAI.postman_collection.json create mode 100644 universal-ai-artifacts/scripts/curl.sh create mode 100644 universal-ai-artifacts/utils/rate_limit.js create mode 100644 universal-ai-artifacts/utils/rate_limit.py diff --git a/universal-ai-artifacts/README.md b/universal-ai-artifacts/README.md new file mode 100644 index 00000000..5be782d8 --- /dev/null +++ b/universal-ai-artifacts/README.md @@ -0,0 +1,47 @@ +# Universal AI Agent – Artifact Bundle + +Contents: +- `openapi/openapi.yaml`: OpenAPI 3.0 spec +- `postman/UniversalAI.postman_collection.json`: Postman collection (variables: `baseUrl`, `token`) +- `clients/node`: Node client (ESM), SSE streaming, WebSocket sample, rate-limited fetch +- `clients/python`: Python client (`requests`), SSE streaming +- `utils`: Rate-limit-safe helpers (`rate_limit.js`, `rate_limit.py`) +- `scripts/curl.sh`: Handy cURL commands + +Quickstart: + +1) Postman +- Import `postman/UniversalAI.postman_collection.json` +- Set variables `baseUrl`, `token` + +2) OpenAPI +- Load `openapi/openapi.yaml` into Swagger UI/Insomnia/Stoplight + +3) Node client +```bash +cd clients/node +npm install +BASE_URL=https://your-domain.com TOKEN=YOUR_TOKEN npm start +# WebSocket sample +BASE_URL=https://your-domain.com TOKEN=YOUR_TOKEN npm run ws +``` + +4) Python client +```bash +cd clients/python +python -m venv .venv && source .venv/bin/activate +pip install -r requirements.txt +BASE_URL=https://your-domain.com TOKEN=YOUR_TOKEN python examples.py +``` + +5) cURL +```bash +cd scripts +chmod +x curl.sh +BASE_URL=https://your-domain.com TOKEN=YOUR_TOKEN ./curl.sh chat "Hello" +``` + +Notes: +- Set `BASE_URL` and `TOKEN` environment variables for all samples. +- Streaming responses are SSE (Server-Sent Events) and parsed via `data: ...` lines. +- Retry/backoff on HTTP 429/5xx is implemented in the rate-limit helpers. \ No newline at end of file diff --git a/universal-ai-artifacts/clients/node/package.json b/universal-ai-artifacts/clients/node/package.json new file mode 100644 index 00000000..b39c7dcc --- /dev/null +++ b/universal-ai-artifacts/clients/node/package.json @@ -0,0 +1,14 @@ +{ + "name": "universal-ai-client", + "version": "0.1.0", + "private": true, + "type": "module", + "engines": { "node": ">=18" }, + "dependencies": { + "ws": "^8.17.0" + }, + "scripts": { + "start": "node src/examples.js", + "ws": "node src/ws_example.js" + } +} \ No newline at end of file diff --git a/universal-ai-artifacts/clients/node/src/client.js b/universal-ai-artifacts/clients/node/src/client.js new file mode 100644 index 00000000..053869e1 --- /dev/null +++ b/universal-ai-artifacts/clients/node/src/client.js @@ -0,0 +1,117 @@ +import { rateLimitedFetch } from '../../utils/rate_limit.js'; + +export class UniversalAIClient { + constructor(baseUrl, token) { + this.baseUrl = (baseUrl || '').replace(/\/$/, ''); + this.token = token; + } + + _headers(json = true) { + const headers = { 'Authorization': `Bearer ${this.token}` }; + if (json) headers['Content-Type'] = 'application/json'; + return headers; + } + + async chat(message, options = {}) { + const response = await rateLimitedFetch(fetch, `${this.baseUrl}/chat`, { + method: 'POST', + headers: this._headers(true), + body: JSON.stringify({ message, ...options }) + }); + return response.json(); + } + + async streamChat(message, onChunk, { optimizePrompt = true } = {}) { + const url = `${this.baseUrl}/stream?message=${encodeURIComponent(message)}&optimizePrompt=${optimizePrompt}`; + const response = await rateLimitedFetch(fetch, url, { + method: 'GET', + headers: { ...this._headers(false), 'Accept': 'text/event-stream' } + }); + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + const chunk = decoder.decode(value); + for (const line of chunk.split('\n')) { + if (line.startsWith('data: ')) { + try { + const data = JSON.parse(line.slice(6)); + if (onChunk) onChunk(data); + } catch (_) { /* ignore parse errors */ } + } + } + } + } + + async getConversations({ limit = 50, offset = 0, userId } = {}) { + const url = new URL(`${this.baseUrl}/conversations`); + url.searchParams.set('limit', String(limit)); + url.searchParams.set('offset', String(offset)); + if (userId) url.searchParams.set('userId', userId); + + const response = await rateLimitedFetch(fetch, url.toString(), { + method: 'GET', + headers: this._headers(false) + }); + return response.json(); + } + + async ragIngest(documents, collection = 'knowledge_base') { + const response = await rateLimitedFetch(fetch, `${this.baseUrl}/rag/ingest`, { + method: 'POST', + headers: this._headers(true), + body: JSON.stringify({ documents, collection }) + }); + return response.json(); + } + + async ragSearch(query, { collection = 'knowledge_base', limit = 5, threshold } = {}) { + const payload = { query, collection, limit }; + if (typeof threshold === 'number') payload.threshold = threshold; + const response = await rateLimitedFetch(fetch, `${this.baseUrl}/rag/search`, { + method: 'POST', + headers: this._headers(true), + body: JSON.stringify(payload) + }); + return response.json(); + } + + async ragAnswer(question, { collection = 'knowledge_base', maxContext = 3, includeContext = true } = {}) { + const response = await rateLimitedFetch(fetch, `${this.baseUrl}/rag/answer`, { + method: 'POST', + headers: this._headers(true), + body: JSON.stringify({ question, collection, maxContext, includeContext }) + }); + return response.json(); + } + + async listPlugins() { + const response = await rateLimitedFetch(fetch, `${this.baseUrl}/plugins`, { + method: 'GET', + headers: this._headers(false) + }); + return response.json(); + } + + async executePlugin(pluginName, action, parameters = {}) { + const response = await rateLimitedFetch(fetch, `${this.baseUrl}/plugins/${encodeURIComponent(pluginName)}/execute`, { + method: 'POST', + headers: this._headers(true), + body: JSON.stringify({ action, parameters }) + }); + return response.json(); + } + + async analyticsDashboard() { + const response = await rateLimitedFetch(fetch, `${this.baseUrl}/analytics/dashboard`, { + method: 'GET', + headers: this._headers(false) + }); + return response.json(); + } +} + +export default UniversalAIClient; \ No newline at end of file diff --git a/universal-ai-artifacts/clients/node/src/examples.js b/universal-ai-artifacts/clients/node/src/examples.js new file mode 100644 index 00000000..86d486e8 --- /dev/null +++ b/universal-ai-artifacts/clients/node/src/examples.js @@ -0,0 +1,31 @@ +import UniversalAIClient from './client.js'; + +const BASE_URL = process.env.BASE_URL || 'https://your-domain.com'; +const TOKEN = process.env.TOKEN || 'YOUR_TOKEN'; + +const client = new UniversalAIClient(BASE_URL, TOKEN); + +async function main() { + console.log('Chat:'); + const chat = await client.chat('Hello, how are you?', { optimizePrompt: true }); + console.log(chat); + + console.log('\nStreaming:'); + await client.streamChat('Tell me a short story.', (chunk) => { + if (chunk.type === 'chunk') process.stdout.write(chunk.content); + if (chunk.type === 'end') console.log('\n[stream end]', chunk.metadata); + }); + + console.log('\nRAG Search:'); + const search = await client.ragSearch('machine learning algorithms', { collection: 'knowledge_base', limit: 3 }); + console.log(search.results?.map(r => ({ id: r.id, score: r.score }))); + + console.log('\nPlugin Execute:'); + const plugin = await client.executePlugin('web-scraper', 'scrape', { url: 'https://example.com', selector: '.content', format: 'text' }); + console.log(plugin); +} + +main().catch((err) => { + console.error(err); + process.exit(1); +}); \ No newline at end of file diff --git a/universal-ai-artifacts/clients/node/src/ws_example.js b/universal-ai-artifacts/clients/node/src/ws_example.js new file mode 100644 index 00000000..640b54ec --- /dev/null +++ b/universal-ai-artifacts/clients/node/src/ws_example.js @@ -0,0 +1,36 @@ +import WebSocket from 'ws'; + +const BASE_URL = process.env.BASE_URL || 'https://your-domain.com'; +const TOKEN = process.env.TOKEN || 'YOUR_TOKEN'; +const WS_URL = process.env.WS_URL || BASE_URL.replace(/^http/, 'ws'); + +const ws = new WebSocket(WS_URL); + +ws.on('open', () => { + console.log('WS connected, authenticating...'); + ws.send(JSON.stringify({ type: 'auth', token: TOKEN })); +}); + +ws.on('message', (buf) => { + const msgText = buf.toString(); + try { + const data = JSON.parse(msgText); + if (data.type === 'auth_success') { + console.log('Auth OK, sending stream_chat'); + ws.send(JSON.stringify({ type: 'stream_chat', text: 'Tell me a story', optimizePrompt: true })); + } else if (data.type === 'stream_chunk') { + process.stdout.write(data.chunk); + } else if (data.type === 'chat_response') { + console.log('\n[chat_response]', data.message); + } else if (data.type === 'error') { + console.error('WS error:', data.message); + } else { + console.log('[WS]', data); + } + } catch (_) { + console.log('[RAW]', msgText); + } +}); + +ws.on('close', () => console.log('WS closed')); +ws.on('error', (err) => console.error('WS error', err)); \ No newline at end of file diff --git a/universal-ai-artifacts/clients/python/examples.py b/universal-ai-artifacts/clients/python/examples.py new file mode 100644 index 00000000..240856f7 --- /dev/null +++ b/universal-ai-artifacts/clients/python/examples.py @@ -0,0 +1,25 @@ +from universal_ai_client import UniversalAIClient +import os + +BASE_URL = os.environ.get('BASE_URL', 'https://your-domain.com') +TOKEN = os.environ.get('TOKEN', 'YOUR_TOKEN') + +client = UniversalAIClient(BASE_URL, TOKEN) + +print('Chat:') +print(client.chat('Hello, how are you?', optimizePrompt=True)) + +print('\nStreaming:') +for event in client.stream_chat('Tell me a short story.'): + if event.get('type') == 'chunk': + print(event.get('content', ''), end='', flush=True) + elif event.get('type') == 'end': + print('\n[stream end]', event.get('metadata')) + break + +print('\nRAG Search:') +search = client.rag_search('machine learning algorithms', collection='knowledge_base', limit=3) +print([{'id': r['id'], 'score': r['score']} for r in search.get('results', [])]) + +print('\nPlugin Execute:') +print(client.execute_plugin('web-scraper', 'scrape', { 'url': 'https://example.com', 'selector': '.content', 'format': 'text' })) \ No newline at end of file diff --git a/universal-ai-artifacts/clients/python/requirements.txt b/universal-ai-artifacts/clients/python/requirements.txt new file mode 100644 index 00000000..37912b81 --- /dev/null +++ b/universal-ai-artifacts/clients/python/requirements.txt @@ -0,0 +1 @@ +requests>=2.31.0 \ No newline at end of file diff --git a/universal-ai-artifacts/clients/python/universal_ai_client.py b/universal-ai-artifacts/clients/python/universal_ai_client.py new file mode 100644 index 00000000..1dae7d3e --- /dev/null +++ b/universal-ai-artifacts/clients/python/universal_ai_client.py @@ -0,0 +1,97 @@ +from __future__ import annotations +import json +from typing import Any, Dict, Generator, Iterable, Optional + +import requests + +from utils.rate_limit import request_with_retries + + +class UniversalAIClient: + def __init__(self, base_url: str, token: str): + self.base_url = base_url.rstrip('/') + self.token = token + self.session = requests.Session() + self.session.headers.update({'Authorization': f'Bearer {token}'}) + + def _json(self, method: str, path: str, json_body: Optional[Dict[str, Any]] = None, stream: bool = False) -> requests.Response: + url = f"{self.base_url}{path}" + headers = {} + if not stream: + headers['Content-Type'] = 'application/json' + return request_with_retries( + self.session.request, + method=method, + url=url, + json=json_body, + headers=headers, + stream=stream, + ) + + def chat(self, message: str, **kwargs: Any) -> Dict[str, Any]: + resp = self._json('POST', '/chat', { 'message': message, **kwargs }) + resp.raise_for_status() + return resp.json() + + def stream_chat(self, message: str, optimize_prompt: bool = True) -> Generator[Dict[str, Any], None, None]: + params = { + 'message': message, + 'optimizePrompt': 'true' if optimize_prompt else 'false' + } + url = f"{self.base_url}/stream" + resp = request_with_retries( + self.session.get, + url=url, + headers={'Accept': 'text/event-stream'}, + params=params, + stream=True, + ) + resp.raise_for_status() + for line in resp.iter_lines(decode_unicode=True): + if not line: + continue + if line.startswith('data: '): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue + + def get_conversations(self, limit: int = 50, offset: int = 0, user_id: Optional[str] = None) -> Dict[str, Any]: + params: Dict[str, Any] = { 'limit': limit, 'offset': offset } + if user_id: + params['userId'] = user_id + url = f"{self.base_url}/conversations" + resp = request_with_retries(self.session.get, url=url, params=params) + resp.raise_for_status() + return resp.json() + + def rag_ingest(self, documents: Iterable[Dict[str, Any]], collection: str = 'knowledge_base') -> Dict[str, Any]: + payload = { 'documents': list(documents), 'collection': collection } + resp = self._json('POST', '/rag/ingest', payload) + resp.raise_for_status() + return resp.json() + + def rag_search(self, query: str, collection: str = 'knowledge_base', limit: int = 5, threshold: Optional[float] = None) -> Dict[str, Any]: + payload: Dict[str, Any] = { 'query': query, 'collection': collection, 'limit': limit } + if threshold is not None: + payload['threshold'] = threshold + resp = self._json('POST', '/rag/search', payload) + resp.raise_for_status() + return resp.json() + + def rag_answer(self, question: str, collection: str = 'knowledge_base', max_context: int = 3, include_context: bool = True) -> Dict[str, Any]: + payload = { 'question': question, 'collection': collection, 'maxContext': max_context, 'includeContext': include_context } + resp = self._json('POST', '/rag/answer', payload) + resp.raise_for_status() + return resp.json() + + def list_plugins(self) -> Dict[str, Any]: + resp = request_with_retries(self.session.get, url=f"{self.base_url}/plugins") + resp.raise_for_status() + return resp.json() + + def execute_plugin(self, plugin_name: str, action: str, parameters: Dict[str, Any]) -> Dict[str, Any]: + payload = { 'action': action, 'parameters': parameters } + resp = self._json('POST', f"/plugins/{plugin_name}/execute", payload) + resp.raise_for_status() + return resp.json() \ No newline at end of file diff --git a/universal-ai-artifacts/openapi/openapi.yaml b/universal-ai-artifacts/openapi/openapi.yaml new file mode 100644 index 00000000..01623be2 --- /dev/null +++ b/universal-ai-artifacts/openapi/openapi.yaml @@ -0,0 +1,928 @@ +openapi: 3.0.3 +info: + title: Universal AI Agent API + version: "1.0.0" + description: | + OpenAPI 3.0 specification generated from the Universal AI Agent API reference. +servers: + - url: https://your-domain.com + description: Production server +security: + - bearerAuth: [] + +paths: + /health: + get: + summary: Health check + description: Returns service health and connected subsystems. + responses: + '200': + description: Health status + content: + application/json: + schema: + $ref: '#/components/schemas/HealthResponse' + + /system/info: + get: + summary: System information + security: + - bearerAuth: [] + responses: + '200': + description: System information + content: + application/json: + schema: + $ref: '#/components/schemas/SystemInfoResponse' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /auth/login: + post: + summary: Login and obtain JWT tokens + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/LoginRequest' + responses: + '200': + description: Login success + content: + application/json: + schema: + $ref: '#/components/schemas/LoginResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /chat: + post: + summary: Standard chat + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ChatRequest' + responses: + '200': + description: Chat response + content: + application/json: + schema: + $ref: '#/components/schemas/ChatResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /stream: + get: + summary: Streaming chat (SSE) + description: Server-Sent Events stream of chat tokens/chunks. + security: + - bearerAuth: [] + parameters: + - in: query + name: message + required: true + schema: + type: string + - in: query + name: optimizePrompt + required: false + schema: + type: boolean + default: false + responses: + '200': + description: text/event-stream output + content: + text/event-stream: + schema: + type: string + description: Stream of SSE events with `data: {"type": "start|chunk|end", ...}` + '401': + $ref: '#/components/responses/UnauthorizedError' + + /conversations: + get: + summary: List conversation history + security: + - bearerAuth: [] + parameters: + - in: query + name: limit + schema: { type: integer, default: 50, minimum: 1 } + - in: query + name: offset + schema: { type: integer, default: 0, minimum: 0 } + - in: query + name: userId + schema: { type: string } + responses: + '200': + description: Conversation list + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationsResponse' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /rag/ingest: + post: + summary: Ingest documents to the RAG collection + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RagIngestRequest' + responses: + '200': + description: Ingest result + content: + application/json: + schema: + $ref: '#/components/schemas/RagIngestResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /rag/search: + post: + summary: Semantic search over RAG documents + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RagSearchRequest' + responses: + '200': + description: Search results + content: + application/json: + schema: + $ref: '#/components/schemas/RagSearchResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /rag/answer: + post: + summary: RAG-enhanced answer generation + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RagAnswerRequest' + responses: + '200': + description: Generated answer with sources and context + content: + application/json: + schema: + $ref: '#/components/schemas/RagAnswerResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /agents/execute: + post: + summary: Execute a multi-agent task + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/AgentsExecuteRequest' + responses: + '200': + description: Task execution results + content: + application/json: + schema: + $ref: '#/components/schemas/AgentsExecuteResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /agents/task/{taskId}: + get: + summary: Get multi-agent task status + security: + - bearerAuth: [] + parameters: + - in: path + name: taskId + required: true + schema: { type: string } + responses: + '200': + description: Task status + content: + application/json: + schema: + $ref: '#/components/schemas/AgentTaskStatusResponse' + '401': + $ref: '#/components/responses/UnauthorizedError' + '404': + $ref: '#/components/responses/NotFoundError' + + /voice/tts: + post: + summary: Text to speech + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TTSRequest' + responses: + '200': + description: TTS result + content: + application/json: + schema: + $ref: '#/components/schemas/TTSResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /voice/command: + post: + summary: Process a voice command (text intent processing) + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/VoiceCommandRequest' + responses: + '200': + description: Intent and action + content: + application/json: + schema: + $ref: '#/components/schemas/VoiceCommandResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /voice/autopilot: + post: + summary: Control autopilot mode + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/VoiceAutopilotRequest' + responses: + '200': + description: Autopilot status + content: + application/json: + schema: + $ref: '#/components/schemas/VoiceAutopilotResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /plugins: + get: + summary: List available plugins + security: + - bearerAuth: [] + responses: + '200': + description: Plugin list + content: + application/json: + schema: + $ref: '#/components/schemas/PluginsListResponse' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /plugins/{pluginName}/execute: + post: + summary: Execute a plugin action + security: + - bearerAuth: [] + parameters: + - in: path + name: pluginName + required: true + schema: { type: string } + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PluginExecuteRequest' + responses: + '200': + description: Plugin execution result + content: + application/json: + schema: + $ref: '#/components/schemas/PluginExecuteResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + '404': + $ref: '#/components/responses/NotFoundError' + + /plugins/install: + post: + summary: Install a plugin + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PluginInstallRequest' + responses: + '200': + description: Installation result + content: + application/json: + schema: + $ref: '#/components/schemas/PluginInstallResponse' + '400': + $ref: '#/components/responses/ValidationError' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /analytics/dashboard: + get: + summary: Get dashboard analytics + security: + - bearerAuth: [] + responses: + '200': + description: Dashboard data + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsDashboardResponse' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /analytics/metrics: + get: + summary: Get system metrics timeseries + security: + - bearerAuth: [] + parameters: + - in: query + name: timeRange + schema: + type: string + enum: ["1h", "24h", "7d", "30d"] + default: "24h" + - in: query + name: metric + schema: + type: string + enum: ["requests", "performance", "errors", "users"] + responses: + '200': + description: Metrics timeseries + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsMetricsResponse' + '401': + $ref: '#/components/responses/UnauthorizedError' + + /analytics/export: + get: + summary: Export analytics data + security: + - bearerAuth: [] + parameters: + - in: query + name: format + schema: + type: string + enum: ["json", "csv"] + default: "json" + - in: query + name: timeRange + schema: + type: string + enum: ["1h", "24h", "7d", "30d"] + default: "24h" + responses: + '200': + description: File download + content: + application/octet-stream: + schema: + type: string + format: binary + '401': + $ref: '#/components/responses/UnauthorizedError' + +components: + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT + + responses: + UnauthorizedError: + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + NotFoundError: + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + ValidationError: + description: Validation error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + schemas: + ErrorResponse: + type: object + properties: + error: + type: boolean + example: true + message: + type: string + code: + type: string + timestamp: + type: string + format: date-time + requestId: + type: string + + HealthResponse: + type: object + properties: + status: + type: string + example: healthy + uptime: + type: integer + memory: + type: string + connections: + type: object + additionalProperties: { type: string } + features: + type: object + properties: + rag: { type: boolean } + voice: { type: boolean } + plugins: { type: boolean } + multiAgent: { type: boolean } + + SystemInfoResponse: + type: object + properties: + version: { type: string } + nodeVersion: { type: string } + platform: { type: string } + architecture: { type: string } + environment: { type: string } + features: + type: array + items: { type: string } + limits: + type: object + properties: + maxRequestSize: { type: string } + rateLimit: + type: object + properties: + window: { type: integer } + max: { type: integer } + + LoginRequest: + type: object + required: [email, password] + properties: + email: { type: string, format: email } + password: { type: string } + mfaCode: { type: string, nullable: true } + + LoginResponse: + type: object + properties: + success: { type: boolean } + accessToken: { type: string } + refreshToken: { type: string } + sessionId: { type: string } + user: + type: object + properties: + id: { type: string } + email: { type: string } + roles: + type: array + items: { type: string } + mfaEnabled: { type: boolean } + + ChatRequest: + type: object + required: [message] + properties: + message: { type: string } + optimizePrompt: { type: boolean, default: false } + context: { type: string, nullable: true } + userId: { type: string, nullable: true } + + ChatResponse: + type: object + properties: + response: { type: string } + metadata: + type: object + properties: + model: { type: string } + tokens: { type: integer } + responseTime: { type: integer } + optimized: { type: boolean } + conversationId: { type: string } + + ConversationsResponse: + type: object + properties: + conversations: + type: array + items: + type: object + properties: + id: { type: string } + userId: { type: string } + messages: + type: array + items: + type: object + properties: + role: { type: string, enum: [user, assistant, system] } + content: { type: string } + timestamp: { type: string, format: date-time } + createdAt: { type: string, format: date-time } + updatedAt: { type: string, format: date-time } + total: { type: integer } + hasMore: { type: boolean } + + RagIngestRequest: + type: object + required: [documents, collection] + properties: + documents: + type: array + items: + type: object + required: [id, content] + properties: + id: { type: string } + content: { type: string } + metadata: + type: object + additionalProperties: {} + collection: { type: string } + + RagIngestResponse: + type: object + properties: + success: { type: boolean } + ingested: { type: integer } + failed: { type: integer } + collection: { type: string } + processingTime: { type: integer } + + RagSearchRequest: + type: object + required: [query, collection] + properties: + query: { type: string } + collection: { type: string } + limit: { type: integer, default: 5 } + threshold: { type: number, format: float, nullable: true } + + RagSearchResponse: + type: object + properties: + results: + type: array + items: + type: object + properties: + id: { type: string } + content: { type: string } + score: { type: number, format: float } + metadata: + type: object + additionalProperties: {} + query: { type: string } + totalResults: { type: integer } + searchTime: { type: integer } + + RagAnswerRequest: + type: object + required: [question, collection] + properties: + question: { type: string } + collection: { type: string } + maxContext: { type: integer, default: 3 } + includeContext: { type: boolean, default: true } + + RagAnswerResponse: + type: object + properties: + answer: { type: string } + sources: + type: array + items: + type: object + properties: + id: { type: string } + title: { type: string } + relevanceScore: { type: number, format: float } + context: + type: array + items: + type: object + properties: + content: { type: string } + source: { type: string } + confidence: { type: number, format: float } + + AgentsExecuteRequest: + type: object + required: [task, agents] + properties: + task: { type: string } + agents: + type: array + items: { type: string } + parallel: { type: boolean, default: false } + maxIterations: { type: integer, default: 3 } + + AgentsExecuteResponse: + type: object + properties: + success: { type: boolean } + taskId: { type: string } + results: + type: array + items: + type: object + properties: + agent: { type: string } + role: { type: string } + response: { type: string } + executionTime: { type: integer } + finalSynthesis: { type: string } + totalExecutionTime: { type: integer } + confidence: { type: number, format: float } + + AgentTaskStatusResponse: + type: object + properties: + taskId: { type: string } + status: { type: string, enum: [queued, running, completed, failed] } + progress: { type: integer } + currentAgent: { type: string, nullable: true } + results: + type: array + items: { type: object } + startTime: { type: string, format: date-time } + endTime: { type: string, format: date-time, nullable: true } + + TTSRequest: + type: object + required: [text, voice, language, speed, format] + properties: + text: { type: string } + voice: { type: string, example: neural } + language: { type: string, example: en-US } + speed: { type: number, default: 1.0 } + format: { type: string, enum: [mp3, wav, ogg], default: mp3 } + + TTSResponse: + type: object + properties: + success: { type: boolean } + audioUrl: { type: string } + duration: { type: number } + format: { type: string } + size: { type: integer } + + VoiceCommandRequest: + type: object + required: [command] + properties: + command: { type: string } + context: { type: string, nullable: true } + userId: { type: string, nullable: true } + + VoiceCommandResponse: + type: object + properties: + success: { type: boolean } + command: { type: string } + intent: { type: string } + entities: + type: array + items: { type: string } + action: { type: string } + response: { type: string } + audioResponse: { type: string } + + VoiceAutopilotRequest: + type: object + required: [mode] + properties: + mode: + type: string + enum: [start, stop, status] + context: { type: string, nullable: true } + preferences: + type: object + properties: + verbosity: { type: string, enum: [low, medium, high] } + autoExecute: { type: boolean } + + VoiceAutopilotResponse: + type: object + properties: + success: { type: boolean } + mode: { type: string } + sessionId: { type: string } + status: { type: string } + capabilities: + type: array + items: { type: string } + + PluginsListResponse: + type: object + properties: + plugins: + type: array + items: + type: object + properties: + name: { type: string } + version: { type: string } + description: { type: string } + category: { type: string } + status: { type: string } + permissions: + type: array + items: { type: string } + total: { type: integer } + + PluginExecuteRequest: + type: object + required: [action, parameters] + properties: + action: { type: string } + parameters: + type: object + additionalProperties: {} + + PluginExecuteResponse: + type: object + properties: + success: { type: boolean } + plugin: { type: string } + action: { type: string } + result: + type: object + additionalProperties: {} + executionTime: { type: integer } + + PluginInstallRequest: + type: object + required: [source, package, version] + properties: + source: { type: string, enum: [npm, git, url] } + package: { type: string } + version: { type: string } + + PluginInstallResponse: + type: object + properties: + success: { type: boolean } + plugin: { type: string } + version: { type: string } + status: { type: string } + message: { type: string } + + AnalyticsDashboardResponse: + type: object + properties: + overview: + type: object + properties: + uptime: { type: integer } + totalRequests: { type: integer } + successRate: { type: string } + averageResponseTime: { type: integer } + activeUsers: { type: integer } + healthScore: { type: integer } + requests: + type: object + properties: + total: { type: integer } + successful: { type: integer } + failed: { type: integer } + byHour: + type: array + items: { type: integer } + topEndpoints: + type: array + items: + type: object + properties: + endpoint: { type: string } + requests: { type: integer } + averageTime: { type: integer } + errorRate: { type: number } + ai: + type: object + properties: + totalTokens: { type: integer } + totalCost: { type: number } + averageResponseTime: { type: integer } + topModels: + type: array + items: + type: object + properties: + model: { type: string } + requests: { type: integer } + tokens: { type: integer } + cost: { type: number } + + AnalyticsMetricsResponse: + type: object + properties: + timeRange: { type: string } + metrics: + type: object + additionalProperties: + type: object + properties: + timestamps: + type: array + items: { type: string, format: date-time } + values: + type: array + items: { type: number } + +x-websocket: + url: wss://your-domain.com + description: | + WebSocket API supports the following client messages: `auth`, `stream_chat`, `voice_command`, `plugin_execute`. + Server messages include: `auth_success`, `chat_response`, `stream_chunk`, `error`. \ No newline at end of file diff --git a/universal-ai-artifacts/postman/UniversalAI.postman_collection.json b/universal-ai-artifacts/postman/UniversalAI.postman_collection.json new file mode 100644 index 00000000..be36e20e --- /dev/null +++ b/universal-ai-artifacts/postman/UniversalAI.postman_collection.json @@ -0,0 +1,205 @@ +{ + "info": { + "name": "Universal AI Agent API", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_postman_id": "c0a1d6b2-1111-4a22-9c9e-abcde1234567" + }, + "variable": [ + { "key": "baseUrl", "value": "https://your-domain.com", "type": "string" }, + { "key": "token", "value": "YOUR_TOKEN", "type": "string" } + ], + "auth": { + "type": "bearer", + "bearer": [ { "key": "token", "value": "{{token}}", "type": "string" } ] + }, + "item": [ + { + "name": "Health", + "request": { "method": "GET", "url": "{{baseUrl}}/health" } + }, + { + "name": "System Info", + "request": { + "method": "GET", + "header": [ { "key": "Authorization", "value": "Bearer {{token}}" } ], + "url": "{{baseUrl}}/system/info" + } + }, + { + "name": "Auth Login", + "request": { + "method": "POST", + "header": [ { "key": "Content-Type", "value": "application/json" } ], + "url": "{{baseUrl}}/auth/login", + "body": { + "mode": "raw", + "raw": "{\n \"email\": \"user@example.com\",\n \"password\": \"secure_password\",\n \"mfaCode\": \"123456\"\n}" + } + } + }, + { + "name": "Chat", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/chat", + "body": { "mode": "raw", "raw": "{\n \"message\": \"Hello, how can you help me?\",\n \"optimizePrompt\": true\n}" } + } + }, + { + "name": "Conversations", + "request": { + "method": "GET", + "header": [ { "key": "Authorization", "value": "Bearer {{token}}" } ], + "url": { + "raw": "{{baseUrl}}/conversations?limit=50&offset=0", + "host": [ "{{baseUrl}}" ], + "path": [ "conversations" ], + "query": [ + { "key": "limit", "value": "50" }, + { "key": "offset", "value": "0" } + ] + } + } + }, + { + "name": "RAG Ingest", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/rag/ingest", + "body": { "mode": "raw", "raw": "{\n \"documents\": [\n {\n \"id\": \"doc1\",\n \"content\": \"This is a sample document content...\",\n \"metadata\": {\n \"title\": \"Sample Document\",\n \"author\": \"John Doe\",\n \"category\": \"technical\"\n }\n }\n ],\n \"collection\": \"knowledge_base\"\n}" } + } + }, + { + "name": "RAG Search", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/rag/search", + "body": { "mode": "raw", "raw": "{\n \"query\": \"machine learning algorithms\",\n \"collection\": \"knowledge_base\",\n \"limit\": 5,\n \"threshold\": 0.7\n}" } + } + }, + { + "name": "RAG Answer", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/rag/answer", + "body": { "mode": "raw", "raw": "{\n \"question\": \"What are the best machine learning algorithms?\",\n \"collection\": \"knowledge_base\",\n \"maxContext\": 3,\n \"includeContext\": true\n}" } + } + }, + { + "name": "Agents Execute", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/agents/execute", + "body": { "mode": "raw", "raw": "{\n \"task\": \"Analyze the performance of our web application and suggest optimizations\",\n \"agents\": [\"planner\", \"critic\", \"executor\"],\n \"parallel\": false,\n \"maxIterations\": 3\n}" } + } + }, + { + "name": "Agent Task Status", + "request": { + "method": "GET", + "header": [ { "key": "Authorization", "value": "Bearer {{token}}" } ], + "url": "{{baseUrl}}/agents/task/task_123" + } + }, + { + "name": "Voice TTS", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/voice/tts", + "body": { "mode": "raw", "raw": "{\n \"text\": \"Hello, this is your AI assistant speaking.\",\n \"voice\": \"neural\",\n \"language\": \"en-US\",\n \"speed\": 1.0,\n \"format\": \"mp3\"\n}" } + } + }, + { + "name": "Voice Command", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/voice/command", + "body": { "mode": "raw", "raw": "{\n \"command\": \"analyze the latest sales data\",\n \"context\": \"dashboard\",\n \"userId\": \"user123\"\n}" } + } + }, + { + "name": "Voice Autopilot", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/voice/autopilot", + "body": { "mode": "raw", "raw": "{\n \"mode\": \"start\",\n \"context\": \"development\",\n \"preferences\": {\n \"verbosity\": \"medium\",\n \"autoExecute\": false\n }\n}" } + } + }, + { + "name": "Plugins List", + "request": { "method": "GET", "header": [ { "key": "Authorization", "value": "Bearer {{token}}" } ], "url": "{{baseUrl}}/plugins" } + }, + { + "name": "Plugin Execute", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/plugins/web-scraper/execute", + "body": { "mode": "raw", "raw": "{\n \"action\": \"scrape\",\n \"parameters\": {\n \"url\": \"https://example.com\",\n \"selector\": ".content",\n \"format\": \"text\"\n }\n}" } + } + }, + { + "name": "Plugin Install", + "request": { + "method": "POST", + "header": [ + { "key": "Authorization", "value": "Bearer {{token}}" }, + { "key": "Content-Type", "value": "application/json" } + ], + "url": "{{baseUrl}}/plugins/install", + "body": { "mode": "raw", "raw": "{\n \"source\": \"npm\",\n \"package\": \"@universal-ai/plugin-example\",\n \"version\": \"1.0.0\"\n}" } + } + }, + { + "name": "Analytics Dashboard", + "request": { "method": "GET", "header": [ { "key": "Authorization", "value": "Bearer {{token}}" } ], "url": "{{baseUrl}}/analytics/dashboard" } + }, + { + "name": "Analytics Metrics", + "request": { + "method": "GET", + "header": [ { "key": "Authorization", "value": "Bearer {{token}}" } ], + "url": "{{baseUrl}}/analytics/metrics?timeRange=24h&metric=requests" + } + }, + { + "name": "Analytics Export", + "request": { "method": "GET", "header": [ { "key": "Authorization", "value": "Bearer {{token}}" } ], "url": "{{baseUrl}}/analytics/export?format=json&timeRange=24h" } + } + ] +} \ No newline at end of file diff --git a/universal-ai-artifacts/scripts/curl.sh b/universal-ai-artifacts/scripts/curl.sh new file mode 100644 index 00000000..cea15bfa --- /dev/null +++ b/universal-ai-artifacts/scripts/curl.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +set -euo pipefail + +BASE_URL="${BASE_URL:-https://your-domain.com}" +TOKEN="${TOKEN:-YOUR_TOKEN}" + +_auth_header() { + echo "Authorization: Bearer ${TOKEN}" +} + +usage() { + cat < +Commands: + health + chat + rag_search + plugin_execute + analytics_dashboard +EOF +} + +health() { + curl -sS "${BASE_URL}/health" | jq . +} + +chat() { + local msg="${1:-Hello}" + curl -sS -X POST "${BASE_URL}/chat" \ + -H "$(_auth_header)" -H 'Content-Type: application/json' \ + -d "{\"message\": \"${msg}\"}" | jq . +} + +rag_search() { + local q="${1:-machine learning}" + curl -sS -X POST "${BASE_URL}/rag/search" \ + -H "$(_auth_header)" -H 'Content-Type: application/json' \ + -d "{\"query\": \"${q}\", \"collection\": \"knowledge_base\", \"limit\": 5}" | jq . +} + +plugin_execute() { + local plugin="${1:?pluginName}" + local action="${2:?action}" + local params="${3:-{}}" + curl -sS -X POST "${BASE_URL}/plugins/${plugin}/execute" \ + -H "$(_auth_header)" -H 'Content-Type: application/json' \ + -d "{\"action\": \"${action}\", \"parameters\": ${params}}" | jq . +} + +analytics_dashboard() { + curl -sS "${BASE_URL}/analytics/dashboard" -H "$(_auth_header)" | jq . +} + +cmd="${1:-}" +case "$cmd" in + health) shift; health "$@" ;; + chat) shift; chat "$@" ;; + rag_search) shift; rag_search "$@" ;; + plugin_execute) shift; plugin_execute "$@" ;; + analytics_dashboard) shift; analytics_dashboard "$@" ;; + *) usage; exit 1 ;; + esac \ No newline at end of file diff --git a/universal-ai-artifacts/utils/rate_limit.js b/universal-ai-artifacts/utils/rate_limit.js new file mode 100644 index 00000000..92d60da4 --- /dev/null +++ b/universal-ai-artifacts/utils/rate_limit.js @@ -0,0 +1,44 @@ +export async function rateLimitedFetch(fetchFn, url, options = {}, { + maxRetries = 4, + baseDelayMs = 500, + backoffFactor = 2, + maxDelayMs = 8000 +} = {}) { + let attempt = 0; + const originalBody = options.body; + + while (true) { + try { + // Re-clone simple bodies for retries + if (attempt > 0 && typeof originalBody === 'string') { + options.body = originalBody; + } + + const response = await fetchFn(url, options); + if (response.status !== 429 && response.status < 500) { + return response; // success or client error + } + + // 429 or 5xx -> retry + if (attempt >= maxRetries) { + return response; // give back last response + } + + const retryAfter = parseInt(response.headers.get('Retry-After') || '0', 10); + const delayFromHeader = Number.isFinite(retryAfter) && retryAfter > 0 ? retryAfter * 1000 : 0; + const backoff = Math.min(baseDelayMs * Math.pow(backoffFactor, attempt), maxDelayMs); + const jitter = Math.floor(Math.random() * 250); + const delayMs = Math.max(delayFromHeader, backoff) + jitter; + + await new Promise((r) => setTimeout(r, delayMs)); + attempt += 1; + continue; + } catch (err) { + if (attempt >= maxRetries) throw err; + const backoff = Math.min(baseDelayMs * Math.pow(backoffFactor, attempt), maxDelayMs); + const jitter = Math.floor(Math.random() * 250); + await new Promise((r) => setTimeout(r, backoff + jitter)); + attempt += 1; + } + } +} \ No newline at end of file diff --git a/universal-ai-artifacts/utils/rate_limit.py b/universal-ai-artifacts/utils/rate_limit.py new file mode 100644 index 00000000..6235d089 --- /dev/null +++ b/universal-ai-artifacts/utils/rate_limit.py @@ -0,0 +1,36 @@ +import time +import random +from typing import Callable, Dict, Any + + +def request_with_retries(sender: Callable[..., Any], max_retries: int = 4, base_delay: float = 0.5, backoff: float = 2.0, max_delay: float = 8.0, **kwargs) -> Any: + """ + rate-limit-safe request wrapper. Expects `sender` to be a callable like + `session.request(method, url, **kwargs)` returning a `requests.Response`. + Retries on HTTP 429 and 5xx. Honors Retry-After header when present. + """ + attempt = 0 + while True: + try: + resp = sender(**kwargs) + if resp.status_code != 429 and resp.status_code < 500: + return resp + if attempt >= max_retries: + return resp + retry_after = resp.headers.get('Retry-After') + delay_from_header = 0.0 + if retry_after is not None: + try: + delay_from_header = float(retry_after) + except ValueError: + delay_from_header = 0.0 + backoff_delay = min(base_delay * (backoff ** attempt), max_delay) + delay = max(delay_from_header, backoff_delay) + random.uniform(0, 0.25) + time.sleep(delay) + attempt += 1 + except Exception: + if attempt >= max_retries: + raise + backoff_delay = min(base_delay * (backoff ** attempt), max_delay) + time.sleep(backoff_delay + random.uniform(0, 0.25)) + attempt += 1 \ No newline at end of file From ad88de25e34d810ce204e87cef013f4184ab524b Mon Sep 17 00:00:00 2001 From: "Sahiix@1" <221578902+sahiixx@users.noreply.github.com> Date: Thu, 2 Oct 2025 09:02:18 +0400 Subject: [PATCH 2/2] Update universal-ai-artifacts/openapi/openapi.yaml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- universal-ai-artifacts/openapi/openapi.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/universal-ai-artifacts/openapi/openapi.yaml b/universal-ai-artifacts/openapi/openapi.yaml index 01623be2..da13db69 100644 --- a/universal-ai-artifacts/openapi/openapi.yaml +++ b/universal-ai-artifacts/openapi/openapi.yaml @@ -741,7 +741,7 @@ components: TTSRequest: type: object - required: [text, voice, language, speed, format] + required: [text, voice, language, voice, language] properties: text: { type: string } voice: { type: string, example: neural }