Spaces:
Paused
Paused
import express from 'express'; | |
import { OpenaiRes } from '../lib/scrapper.js'; | |
import { NvidiaTogether } from '../lib/@randydev/together/llama.js'; | |
import { CohereAI } from '../lib/@randydev/together/cohere.js'; | |
import { authenticateApiKey, authenticateApiKeyPremium, apiLimiter } from '../middleware/midware.js'; | |
const GptRoutes = express.Router(); | |
/** | |
* @swagger | |
* /api/v1/ai/cohere/command-plus: | |
* get: | |
* summary: Cohere AI | |
* tags: [AI] | |
* parameters: | |
* - in: query | |
* name: query | |
* required: true | |
* description: null | |
* schema: | |
* type: string | |
* - in: query | |
* name: system_prompt | |
* required: false | |
* description: null | |
* schema: | |
* type: string | |
* - in: header | |
* name: x-api-key | |
* required: true | |
* description: API key for authentication | |
* schema: | |
* type: string | |
* responses: | |
* 200: | |
* description: Success | |
*/ | |
GptRoutes.get('/api/v1/ai/cohere/command-plus', authenticateApiKeyPremium, apiLimiter, async (req, res) => { | |
try { | |
const query = req.query.query; | |
let system_prompt = "Your name is AkenoX AI A kind and friendly AI assistant that answers in\na short and concise answer. Give short step-by-step reasoning if required.\n"; | |
system_prompt = req.query.system_prompt ? req.query.system_prompt : system_prompt; | |
const results = await CohereAI(query, { | |
system_prompt: system_prompt | |
}); | |
res.json({ results }); | |
} catch (error) { | |
res.status(401).json({ error: error.message }); | |
} | |
}); | |
/** | |
* @swagger | |
* /api/v1/ai/nvidia/llama-31-70b: | |
* get: | |
* summary: Nvidia LLama AI | |
* tags: [AI] | |
* parameters: | |
* - in: query | |
* name: query | |
* required: true | |
* description: null | |
* schema: | |
* type: string | |
* - in: query | |
* name: system_prompt | |
* required: false | |
* description: null | |
* schema: | |
* type: string | |
* - in: header | |
* name: x-api-key | |
* required: true | |
* description: API key for authentication | |
* schema: | |
* type: string | |
* responses: | |
* 200: | |
* description: Success | |
*/ | |
GptRoutes.get('/api/v1/ai/nvidia/llama-31-70b', authenticateApiKeyPremium, apiLimiter, async (req, res) => { | |
try { | |
const query = req.query.query; | |
let system_prompt = "Your name is AkenoX AI A kind and friendly AI assistant that answers in\na short and concise answer. Give short step-by-step reasoning if required.\n"; | |
system_prompt = req.query.system_prompt ? req.query.system_prompt : system_prompt; | |
const results = await NvidiaTogether(query, { | |
system_prompt: system_prompt | |
}); | |
res.json({ results }); | |
} catch (error) { | |
res.status(401).json({ error: error.message }); | |
} | |
}); | |
/** | |
* @swagger | |
* /api/v1/ai/gpt-old: | |
* get: | |
* summary: GPT OLD version turbo | |
* tags: [AI] | |
* parameters: | |
* - in: query | |
* name: query | |
* required: true | |
* description: The query to be processed by the GPT OLD. | |
* schema: | |
* type: string | |
* - in: header | |
* name: x-api-key | |
* required: true | |
* description: API key for authentication | |
* schema: | |
* type: string | |
* responses: | |
* 200: | |
* description: Success | |
*/ | |
GptRoutes.get('/api/v1/ai/gpt-old', authenticateApiKey, apiLimiter, async (req, res) => { | |
try { | |
const query = req.query.query; | |
const results = await OpenaiRes(query); | |
res.json({ results }); | |
} catch (error) { | |
res.status(401).json({ error: error.message }); | |
} | |
}); | |
export { GptRoutes }; |