Delete server.js
Browse files
server.js
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
const express = require('express');
|
2 |
-
const proxy = require('express-http-proxy');
|
3 |
-
const app = express();
|
4 |
-
const bodyParser = require('body-parser');
|
5 |
-
const targetUrl = 'https://api.openai.com';
|
6 |
-
const openaiKey = process.env.OPENAI_KEY;
|
7 |
-
const proxyKey = process.env.PROXY_KEY; // Your secret proxy key
|
8 |
-
const port = 7860;
|
9 |
-
const baseUrl = getExternalUrl(process.env.SPACE_ID);
|
10 |
-
const rateLimit = require('express-rate-limit');
|
11 |
-
const requestIp = require('request-ip');
|
12 |
-
app.use(bodyParser.json({ limit: '50mb' }));
|
13 |
-
|
14 |
-
app.set('trust proxy', 1);
|
15 |
-
|
16 |
-
//app.use(requestIp.mw());
|
17 |
-
// Middleware to log requester's IP address
|
18 |
-
//function logIPAddress(req, res, next) {
|
19 |
-
// console.log("Requester's IP address:", req.ip);
|
20 |
-
//next();
|
21 |
-
//}
|
22 |
-
|
23 |
-
// Apply the middleware to all requests
|
24 |
-
//app.use(logIPAddress);
|
25 |
-
|
26 |
-
// Rate limiting middleware
|
27 |
-
const limiter = rateLimit({
|
28 |
-
windowMs: 15 * 60 * 1000, // 15 minutes
|
29 |
-
max: 50, // limit each IP to 50 requests per windowMs
|
30 |
-
keyGenerator: (req, res) => {
|
31 |
-
return req.ip // IP address from requestIp.mw(), as opposed to req.ip
|
32 |
-
},
|
33 |
-
handler: function (req, res, next) {
|
34 |
-
res.status(429).json({
|
35 |
-
message: "Too many requests. Please try again later. You can retry after 15 minutes.",
|
36 |
-
});
|
37 |
-
},
|
38 |
-
});
|
39 |
-
|
40 |
-
// Apply the rate limiter to all requests
|
41 |
-
app.use(limiter);
|
42 |
-
|
43 |
-
// Middleware to authenticate requests with the proxy key and check the model
|
44 |
-
function authenticateProxyKeyAndModel(req, res, next) {
|
45 |
-
const providedKey = req.headers['auro']; // Assuming the key is sent in the 'x-proxy-key' header
|
46 |
-
const requestedModel = req.body.model;
|
47 |
-
|
48 |
-
// List of allowed models
|
49 |
-
const allowedModels = ['gpt-3.5-turbo', 'text-moderation-latest', 'gpt-3.5-turbo-1106'];
|
50 |
-
|
51 |
-
if (providedKey && providedKey === proxyKey && allowedModels.includes(requestedModel)) {
|
52 |
-
// If the provided key matches the expected key and the requested model is allowed, allow the request to proceed
|
53 |
-
next();
|
54 |
-
} else {
|
55 |
-
// If the key is missing or incorrect, or the model is not allowed, reject the request with an error response
|
56 |
-
res.status(401).json({ error: 'Unauthorized or invalid model' });
|
57 |
-
}
|
58 |
-
}
|
59 |
-
|
60 |
-
|
61 |
-
app.use('/api', authenticateProxyKeyAndModel, proxy(targetUrl, {
|
62 |
-
proxyReqOptDecorator: (proxyReqOpts, srcReq) => {
|
63 |
-
// Modify the request headers if necessary
|
64 |
-
proxyReqOpts.headers['Authorization'] = 'Bearer ' + openaiKey;
|
65 |
-
return proxyReqOpts;
|
66 |
-
},
|
67 |
-
}));
|
68 |
-
|
69 |
-
app.get("/", (req, res) => {
|
70 |
-
// res.send(`This is your OpenAI Reverse Proxy URL: ${baseUrl}`);
|
71 |
-
});
|
72 |
-
|
73 |
-
function getExternalUrl(spaceId) {
|
74 |
-
try {
|
75 |
-
const [username, spacename] = spaceId.split("/");
|
76 |
-
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space/api/v1`;
|
77 |
-
} catch (e) {
|
78 |
-
return "";
|
79 |
-
}
|
80 |
-
}
|
81 |
-
|
82 |
-
app.listen(port, () => {
|
83 |
-
console.log(`Reverse proxy server running on ${baseUrl}`);
|
84 |
-
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|