Created via MCP
This commit is contained in:
254
apps/ollama-mcp/mcp-prometheus/index.js
Normal file
254
apps/ollama-mcp/mcp-prometheus/index.js
Normal file
@@ -0,0 +1,254 @@
|
||||
const express = require('express');
|
||||
const axios = require('axios');
|
||||
|
||||
const app = express();
|
||||
const port = process.env.PORT || 3000;
|
||||
|
||||
const PROMETHEUS_URL = process.env.PROMETHEUS_URL || 'http://localhost:9090';
|
||||
const GRAFANA_URL = process.env.GRAFANA_URL || 'http://localhost:3000';
|
||||
const GRAFANA_TOKEN = process.env.GRAFANA_TOKEN || '';
|
||||
|
||||
app.use(express.json());
|
||||
app.use((req, res, next) => {
|
||||
res.header('Access-Control-Allow-Origin', '*');
|
||||
res.header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS');
|
||||
res.header('Access-Control-Allow-Headers', 'Content-Type, Authorization');
|
||||
if (req.method === 'OPTIONS') return res.sendStatus(200);
|
||||
next();
|
||||
});
|
||||
|
||||
const promQuery = async (query, time) => {
|
||||
const params = { query };
|
||||
if (time) params.time = time;
|
||||
const res = await axios.get(`${PROMETHEUS_URL}/api/v1/query`, { params });
|
||||
return res.data;
|
||||
};
|
||||
|
||||
const promRangeQuery = async (query, start, end, step) => {
|
||||
const params = { query, start, end, step: step || '60s' };
|
||||
const res = await axios.get(`${PROMETHEUS_URL}/api/v1/query_range`, { params });
|
||||
return res.data;
|
||||
};
|
||||
|
||||
app.get('/health', (req, res) => res.json({ status: 'healthy', service: 'mcp-prometheus' }));
|
||||
|
||||
app.get('/', (req, res) => res.json({
|
||||
service: 'MCP Prometheus/Grafana Server',
|
||||
version: '1.0.0',
|
||||
endpoints: [
|
||||
'POST /api/query - Run instant PromQL query',
|
||||
'POST /api/query_range - Run range PromQL query',
|
||||
'POST /api/alerts - List firing alerts',
|
||||
'POST /api/targets - List scrape targets and health',
|
||||
'POST /api/pod_cpu - CPU usage by pod/namespace',
|
||||
'POST /api/pod_memory - Memory usage by pod/namespace',
|
||||
'POST /api/pod_restarts - Pod restart counts',
|
||||
'POST /api/node_resources - Node CPU/memory utilization',
|
||||
'POST /api/pvc_usage - PVC disk usage',
|
||||
'POST /api/http_errors - HTTP error rates (nginx ingress)',
|
||||
'POST /api/grafana_dashboards - List Grafana dashboards',
|
||||
]
|
||||
}));
|
||||
|
||||
// Raw PromQL query
|
||||
app.post('/api/query', async (req, res) => {
|
||||
try {
|
||||
const { query, time } = req.body;
|
||||
if (!query) return res.status(400).json({ error: 'query is required' });
|
||||
const data = await promQuery(query, time);
|
||||
res.json(data);
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// Range query
|
||||
app.post('/api/query_range', async (req, res) => {
|
||||
try {
|
||||
const { query, start, end, step } = req.body;
|
||||
if (!query) return res.status(400).json({ error: 'query is required' });
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const data = await promRangeQuery(
|
||||
query,
|
||||
start || now - 3600,
|
||||
end || now,
|
||||
step || '60s'
|
||||
);
|
||||
res.json(data);
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// Firing alerts
|
||||
app.post('/api/alerts', async (req, res) => {
|
||||
try {
|
||||
const r = await axios.get(`${PROMETHEUS_URL}/api/v1/alerts`);
|
||||
const alerts = r.data.data.alerts.filter(a => a.state === 'firing' || !req.body.firingOnly);
|
||||
res.json({ count: alerts.length, alerts });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// Scrape targets
|
||||
app.post('/api/targets', async (req, res) => {
|
||||
try {
|
||||
const r = await axios.get(`${PROMETHEUS_URL}/api/v1/targets`);
|
||||
const targets = r.data.data.activeTargets.map(t => ({
|
||||
job: t.labels.job,
|
||||
instance: t.labels.instance,
|
||||
health: t.health,
|
||||
lastScrape: t.lastScrape,
|
||||
lastError: t.lastError || null
|
||||
}));
|
||||
const unhealthy = targets.filter(t => t.health !== 'up');
|
||||
res.json({ total: targets.length, unhealthy: unhealthy.length, targets });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// Pod CPU usage
|
||||
app.post('/api/pod_cpu', async (req, res) => {
|
||||
try {
|
||||
const { namespace, pod, duration } = req.body;
|
||||
const dur = duration || '5m';
|
||||
let query = `sum(rate(container_cpu_usage_seconds_total{container!=""}[${dur}])) by (pod, namespace)`;
|
||||
if (namespace) query = `sum(rate(container_cpu_usage_seconds_total{container!="",namespace="${namespace}"}[${dur}])) by (pod, namespace)`;
|
||||
if (pod) query = `sum(rate(container_cpu_usage_seconds_total{container!="",pod=~"${pod}.*"}[${dur}])) by (pod, namespace)`;
|
||||
const data = await promQuery(query);
|
||||
const results = (data.data?.result || [])
|
||||
.map(r => ({ pod: r.metric.pod, namespace: r.metric.namespace, cpu_cores: parseFloat(r.value[1]).toFixed(4) }))
|
||||
.sort((a, b) => b.cpu_cores - a.cpu_cores);
|
||||
res.json({ count: results.length, results });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// Pod memory usage
|
||||
app.post('/api/pod_memory', async (req, res) => {
|
||||
try {
|
||||
const { namespace, pod } = req.body;
|
||||
let query = `sum(container_memory_working_set_bytes{container!=""}) by (pod, namespace)`;
|
||||
if (namespace) query = `sum(container_memory_working_set_bytes{container!="",namespace="${namespace}"}) by (pod, namespace)`;
|
||||
if (pod) query = `sum(container_memory_working_set_bytes{container!="",pod=~"${pod}.*"}) by (pod, namespace)`;
|
||||
const data = await promQuery(query);
|
||||
const results = (data.data?.result || [])
|
||||
.map(r => ({
|
||||
pod: r.metric.pod,
|
||||
namespace: r.metric.namespace,
|
||||
memory_mb: (parseFloat(r.value[1]) / 1024 / 1024).toFixed(1)
|
||||
}))
|
||||
.sort((a, b) => b.memory_mb - a.memory_mb);
|
||||
res.json({ count: results.length, results });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// Pod restarts
|
||||
app.post('/api/pod_restarts', async (req, res) => {
|
||||
try {
|
||||
const { namespace, threshold } = req.body;
|
||||
let query = `sum(kube_pod_container_status_restarts_total) by (pod, namespace, container)`;
|
||||
if (namespace) query = `sum(kube_pod_container_status_restarts_total{namespace="${namespace}"}) by (pod, namespace, container)`;
|
||||
const data = await promQuery(query);
|
||||
const min = threshold || 0;
|
||||
const results = (data.data?.result || [])
|
||||
.map(r => ({ pod: r.metric.pod, namespace: r.metric.namespace, container: r.metric.container, restarts: parseInt(r.value[1]) }))
|
||||
.filter(r => r.restarts > min)
|
||||
.sort((a, b) => b.restarts - a.restarts);
|
||||
res.json({ count: results.length, results });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// Node resource utilization
|
||||
app.post('/api/node_resources', async (req, res) => {
|
||||
try {
|
||||
const cpuData = await promQuery(`100 - (avg by(node) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100)`);
|
||||
const memData = await promQuery(`(1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)) * 100`);
|
||||
const diskData = await promQuery(`(1 - (node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"})) * 100`);
|
||||
|
||||
const nodeMap = {};
|
||||
(cpuData.data?.result || []).forEach(r => {
|
||||
const node = r.metric.node || r.metric.instance;
|
||||
nodeMap[node] = { node, cpu_percent: parseFloat(r.value[1]).toFixed(1) };
|
||||
});
|
||||
(memData.data?.result || []).forEach(r => {
|
||||
const node = r.metric.instance || r.metric.node;
|
||||
if (nodeMap[node]) nodeMap[node].memory_percent = parseFloat(r.value[1]).toFixed(1);
|
||||
});
|
||||
(diskData.data?.result || []).forEach(r => {
|
||||
const node = r.metric.instance || r.metric.node;
|
||||
if (nodeMap[node]) nodeMap[node].disk_percent = parseFloat(r.value[1]).toFixed(1);
|
||||
});
|
||||
|
||||
res.json({ nodes: Object.values(nodeMap) });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// PVC usage
|
||||
app.post('/api/pvc_usage', async (req, res) => {
|
||||
try {
|
||||
const { namespace } = req.body;
|
||||
let query = `(kubelet_volume_stats_used_bytes / kubelet_volume_stats_capacity_bytes) * 100`;
|
||||
if (namespace) query = `(kubelet_volume_stats_used_bytes{namespace="${namespace}"} / kubelet_volume_stats_capacity_bytes{namespace="${namespace}"}) * 100`;
|
||||
const data = await promQuery(query);
|
||||
const results = (data.data?.result || [])
|
||||
.map(r => ({
|
||||
pvc: r.metric.persistentvolumeclaim,
|
||||
namespace: r.metric.namespace,
|
||||
used_percent: parseFloat(r.value[1]).toFixed(1)
|
||||
}))
|
||||
.sort((a, b) => b.used_percent - a.used_percent);
|
||||
res.json({ count: results.length, results });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// HTTP error rates (nginx ingress)
|
||||
app.post('/api/http_errors', async (req, res) => {
|
||||
try {
|
||||
const { duration } = req.body;
|
||||
const dur = duration || '5m';
|
||||
const query = `sum(rate(nginx_ingress_controller_requests{status=~"5.."}[${dur}])) by (ingress, namespace, status)`;
|
||||
const data = await promQuery(query);
|
||||
const results = (data.data?.result || [])
|
||||
.map(r => ({
|
||||
ingress: r.metric.ingress,
|
||||
namespace: r.metric.namespace,
|
||||
status: r.metric.status,
|
||||
rps: parseFloat(r.value[1]).toFixed(4)
|
||||
}))
|
||||
.sort((a, b) => b.rps - a.rps);
|
||||
res.json({ count: results.length, results });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
// List Grafana dashboards
|
||||
app.post('/api/grafana_dashboards', async (req, res) => {
|
||||
try {
|
||||
const headers = GRAFANA_TOKEN ? { Authorization: `Bearer ${GRAFANA_TOKEN}` } : {};
|
||||
const r = await axios.get(`${GRAFANA_URL}/api/search?type=dash-db`, { headers });
|
||||
res.json({ count: r.data.length, dashboards: r.data.map(d => ({ uid: d.uid, title: d.title, url: d.url, tags: d.tags })) });
|
||||
} catch (e) {
|
||||
res.status(500).json({ error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
app.use((req, res) => res.status(404).json({ error: 'Not found' }));
|
||||
|
||||
app.listen(port, '0.0.0.0', () => {
|
||||
console.log(`MCP Prometheus Server running on port ${port}`);
|
||||
console.log(`Prometheus: ${PROMETHEUS_URL}`);
|
||||
console.log(`Grafana: ${GRAFANA_URL}`);
|
||||
});
|
||||
Reference in New Issue
Block a user