Initial agentic-os GitOps scaffold; Argo sources point at Gitea

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
Deep Koluguri 2026-05-11 11:33:48 -04:00
commit d0f2492e3f
90 changed files with 2064 additions and 0 deletions

24
.gitignore vendored Normal file
View File

@ -0,0 +1,24 @@
# Python
__pycache__/
*.py[cod]
.venv/
venv/
*.egg-info/
dist/
build/
# Node
node_modules/
.next/
out/
# IDE / OS
.idea/
.vscode/
.DS_Store
Thumbs.db
# Secrets (never commit)
*.pem
.env
.env.*

View File

@ -0,0 +1,15 @@
FROM python:3.11-slim
ENV PYTHONDONTWRITEBYTECODE=1 PYTHONUNBUFFERED=1 PIP_NO_CACHE_DIR=1
RUN apt-get update \
&& apt-get install -y --no-install-recommends curl ca-certificates \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /opt/agent
RUN pip install --no-cache-dir \
"langgraph>=0.2.0" \
"langchain-core>=0.3.0" \
"langchain-openai>=0.2.0" \
"litellm>=1.50.0" \
"mcp[cli]>=1.2.0" \
"psycopg[binary]>=3.2.0" \
"langgraph-checkpoint-postgres>=2.0.0"
COPY README.agent.txt /opt/agent/README.agent.txt

View File

@ -0,0 +1 @@
Agent base image: Python 3.11 with LangGraph, LangChain MCP adapters, LiteLLM client libraries, and Postgres drivers for AsyncPostgresSaver checkpointers.

View File

@ -0,0 +1 @@
"""Bernard dev/PR agent (scaffold)."""

View File

@ -0,0 +1,6 @@
def main() -> None:
raise SystemExit("Bernard agent is not implemented yet.")
if __name__ == "__main__":
main()

10
agents/gumbo/Dockerfile Normal file
View File

@ -0,0 +1,10 @@
FROM python:3.11-slim
ENV PYTHONDONTWRITEBYTECODE=1 PYTHONUNBUFFERED=1 PIP_NO_CACHE_DIR=1 PYTHONPATH=/app
WORKDIR /app
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt /app/requirements.txt
RUN pip install --no-cache-dir -r /app/requirements.txt
COPY gumbo /app/gumbo
CMD ["python", "-m", "gumbo.run"]

View File

@ -0,0 +1,10 @@
FROM python:3.11-slim
ENV PYTHONDONTWRITEBYTECODE=1 PYTHONUNBUFFERED=1 PIP_NO_CACHE_DIR=1 PYTHONPATH=/app
WORKDIR /app
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt /app/requirements.txt
RUN pip install --no-cache-dir -r /app/requirements.txt
COPY gumbo /app/gumbo
CMD ["python", "-m", "gumbo.temporal.worker"]

View File

@ -0,0 +1 @@
"""Gumbo summarization agent (LangGraph + Postgres checkpointer)."""

View File

@ -0,0 +1,63 @@
"""LangGraph definition for Gumbo (load from MCP FS -> summarize via LiteLLM)."""
from __future__ import annotations
import os
from typing import TypedDict
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver
from langgraph.graph import END, StateGraph
from gumbo.mcp_fs import fetch_text_via_mcp
class GumboState(TypedDict):
object_key: str
source_text: str
summary: str
async def _load(state: GumboState) -> dict:
text = await fetch_text_via_mcp(state["object_key"])
return {"source_text": text}
async def _summarize(state: GumboState) -> dict:
llm = ChatOpenAI(
model=os.environ.get("GUMBO_LLM_MODEL", "ollama-qwen"),
api_key=os.environ["LITELLM_API_KEY"],
base_url=os.environ["LITELLM_BASE_URL"],
temperature=0.2,
)
system = SystemMessage(
content="You are Gumbo, a precise documentation summarizer. Return a concise markdown summary."
)
user = HumanMessage(
content=f"Summarize the following document:\n\n{state['source_text']}",
)
resp = await llm.ainvoke([system, user])
summary = resp.content if isinstance(resp.content, str) else str(resp.content)
return {"summary": summary}
def build_graph_builder() -> StateGraph:
builder = StateGraph(GumboState)
builder.add_node("load", _load)
builder.add_node("summarize", _summarize)
builder.set_entry_point("load")
builder.add_edge("load", "summarize")
builder.add_edge("summarize", END)
return builder
async def run_gumbo(object_key: str, thread_id: str, conn_string: str) -> GumboState:
builder = build_graph_builder()
async with AsyncPostgresSaver.from_conn_string(conn_string) as checkpointer:
await checkpointer.setup()
graph = builder.compile(checkpointer=checkpointer)
return await graph.ainvoke(
{"object_key": object_key, "source_text": "", "summary": ""},
config={"configurable": {"thread_id": thread_id}},
)

View File

@ -0,0 +1,17 @@
"""Human-in-the-loop gate for high-risk tool names (Temporal approval pattern).
Tool names matching ``(push|apply|send|delete)`` should not execute inside the agent
process without an explicit Temporal resume signal. In LangGraph, prefer ``interrupt``
from ``langgraph.types`` so the checkpoint captures the pending decision; the Temporal
worker should treat an ``interrupt`` payload as a workflow wait + external approval API.
"""
from __future__ import annotations
import re
_DANGEROUS = re.compile(r"(push|apply|send|delete)", re.IGNORECASE)
def tool_name_requires_temporal_approval(tool_name: str) -> bool:
return bool(_DANGEROUS.search(tool_name))

View File

@ -0,0 +1,24 @@
"""Async MCP client helpers for the filesystem (MinIO) server."""
from __future__ import annotations
import os
from mcp import ClientSession
from mcp.client.sse import sse_client
from mcp.types import TextContent
async def fetch_text_via_mcp(object_key: str) -> str:
url = os.environ["MCP_FS_SSE_URL"]
async with sse_client(url) as (read_stream, write_stream):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
result = await session.call_tool("get_object_text", {"key": object_key})
if result.isError:
raise RuntimeError(result.content)
parts: list[str] = []
for block in result.content:
if isinstance(block, TextContent):
parts.append(block.text)
return "".join(parts)

30
agents/gumbo/gumbo/run.py Normal file
View File

@ -0,0 +1,30 @@
"""Job entrypoint: run the Gumbo LangGraph once and print the summary to stdout."""
from __future__ import annotations
import asyncio
import json
import os
import sys
from gumbo.graph import run_gumbo
async def _main() -> int:
object_key = os.environ["GUMBO_OBJECT_KEY"]
thread_id = os.environ.get("GUMBO_THREAD_ID", "gumbo-default-thread")
conn = os.environ["LANGGRAPH_CHECKPOINT_URI"]
result = await run_gumbo(object_key, thread_id, conn)
payload = {"summary": result["summary"], "object_key": object_key, "thread_id": thread_id}
sys.stdout.write(json.dumps(payload))
sys.stdout.flush()
return 0
def main() -> None:
raise SystemExit(asyncio.run(_main()))
if __name__ == "__main__":
main()

View File

@ -0,0 +1 @@
"""Temporal workflow/activity entrypoints for Gumbo."""

View File

@ -0,0 +1,51 @@
from __future__ import annotations
import json
import os
from typing import Any
import psycopg
from temporalio import activity
@activity.defn
async def run_gumbo_langgraph_job(params: dict[str, Any]) -> dict[str, Any]:
from gumbo.graph import run_gumbo
conn = os.environ["LANGGRAPH_CHECKPOINT_URI"]
state = await run_gumbo(params["object_key"], params["thread_id"], conn)
return {"summary": state["summary"], "object_key": params["object_key"], "thread_id": params["thread_id"]}
@activity.defn
async def persist_gumbo_summary(payload: dict[str, Any]) -> None:
dsn = os.environ["GUMBO_RESULTS_DSN"]
conn = await psycopg.AsyncConnection.connect(dsn)
try:
async with conn.cursor() as cur:
await cur.execute(
"""
CREATE TABLE IF NOT EXISTS gumbo_summaries (
id bigserial PRIMARY KEY,
workflow_id text NOT NULL,
object_key text NOT NULL,
summary text NOT NULL,
created_at timestamptz NOT NULL DEFAULT now()
);
"""
)
await cur.execute(
"""
INSERT INTO gumbo_summaries (workflow_id, object_key, summary)
VALUES (%s, %s, %s);
""",
(payload["workflow_id"], payload["object_key"], payload["summary"]),
)
await conn.commit()
finally:
await conn.close()
@activity.defn
async def parse_gumbo_job_logs(job_log_bytes: bytes) -> dict[str, Any]:
return json.loads(job_log_bytes.decode("utf-8"))

View File

@ -0,0 +1,37 @@
from __future__ import annotations
import asyncio
import os
from temporalio.client import Client
from temporalio.worker import Worker
from gumbo.temporal import activities
from gumbo.temporal.workflows import GumboApprovalGateWorkflow, GumboSummarizeWorkflow
async def _main() -> None:
address = os.environ.get("TEMPORAL_ADDRESS", "temporal-frontend.ai-core.svc.cluster.local:7233")
namespace = os.environ.get("TEMPORAL_NAMESPACE", "default")
task_queue = os.environ.get("GUMBO_TASK_QUEUE", "gumbo")
client = await Client.connect(address, namespace=namespace)
worker = Worker(
client,
task_queue=task_queue,
workflows=[GumboSummarizeWorkflow, GumboApprovalGateWorkflow],
activities=[
activities.run_gumbo_langgraph_job,
activities.persist_gumbo_summary,
activities.parse_gumbo_job_logs,
],
)
await worker.run()
def main() -> None:
asyncio.run(_main())
if __name__ == "__main__":
main()

View File

@ -0,0 +1,50 @@
from __future__ import annotations
from datetime import timedelta
from temporalio import workflow
with workflow.unsafe.imports_passed_through():
from gumbo.temporal.activities import persist_gumbo_summary, run_gumbo_langgraph_job
@workflow.defn
class GumboSummarizeWorkflow:
"""Orchestrates Gumbo: LangGraph run (MCP + LiteLLM + Postgres checkpoints) then durable write."""
@workflow.run
async def run(self, object_key: str) -> str:
wf_id = workflow.info().workflow_id
params = {"object_key": object_key, "thread_id": wf_id}
out = await workflow.execute_activity(
run_gumbo_langgraph_job,
params,
start_to_close_timeout=timedelta(minutes=30),
)
await workflow.execute_activity(
persist_gumbo_summary,
{
"workflow_id": wf_id,
"object_key": out["object_key"],
"summary": out["summary"],
},
start_to_close_timeout=timedelta(minutes=5),
)
return out["summary"]
@workflow.defn
class GumboApprovalGateWorkflow:
"""Example pattern: wait for an external approval signal before continuing."""
def __init__(self) -> None:
self._approved = False
@workflow.signal
def approve(self) -> None:
self._approved = True
@workflow.run
async def run(self) -> str:
await workflow.wait_condition(lambda: self._approved)
return "approved"

View File

@ -0,0 +1,7 @@
langgraph>=0.2.0
langgraph-checkpoint-postgres>=2.0.0
langchain-core>=0.3.0
langchain-openai>=0.2.0
mcp[cli]>=1.2.0
psycopg[binary]>=3.2.0
temporalio>=1.8.0

View File

@ -0,0 +1,66 @@
# Template for a Gumbo Kubernetes Job. Replace REPLACE_IMAGE after building agents/gumbo.
apiVersion: batch/v1
kind: Job
metadata:
name: gumbo-run
namespace: ai-agents-gumbo
spec:
suspend: true
ttlSecondsAfterFinished: 86400
backoffLimit: 0
template:
metadata:
labels:
app.kubernetes.io/name: gumbo-job
spec:
restartPolicy: Never
containers:
- name: gumbo
image: REPLACE_IMAGE
imagePullPolicy: IfNotPresent
env:
- name: GUMBO_OBJECT_KEY
value: "REPLACE_OBJECT_KEY"
- name: GUMBO_THREAD_ID
valueFrom:
fieldRef:
fieldPath: metadata.uid
- name: MCP_FS_SSE_URL
value: "http://mcp-filesystem.tools-mcp.svc.cluster.local:8080/sse"
- name: LITELLM_BASE_URL
value: "http://litellm.ai-core.svc.cluster.local:4000/v1"
- name: LITELLM_API_KEY
valueFrom:
secretKeyRef:
name: gumbo-litellm
key: api_key
- name: LANGGRAPH_CHECKPOINT_URI
valueFrom:
secretKeyRef:
name: gumbo-checkpoint-db
key: uri
resources:
requests:
cpu: "1"
memory: 2Gi
limits:
cpu: "4"
memory: 8Gi
---
apiVersion: v1
kind: Secret
metadata:
name: gumbo-litellm
namespace: ai-agents-gumbo
type: Opaque
stringData:
api_key: change-me-litellm-master
---
apiVersion: v1
kind: Secret
metadata:
name: gumbo-checkpoint-db
namespace: ai-agents-gumbo
type: Opaque
stringData:
uri: postgresql://agentic_os:change-me@agentic-os-pg-rw.platform-data.svc.cluster.local:5432/gumbo?sslmode=disable

View File

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- gumbo-job-template.yaml

View File

@ -0,0 +1,46 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://agentic-os.local/schemas/agent-message.json",
"title": "AgentMessage",
"type": "object",
"required": ["schema_version", "message_id", "correlation_id", "from_agent", "payload"],
"properties": {
"schema_version": {
"type": "string",
"const": "1.0"
},
"message_id": {
"type": "string",
"format": "uuid"
},
"correlation_id": {
"type": "string",
"description": "Temporal workflow run id or trace id"
},
"thread_id": {
"type": "string",
"description": "LangGraph thread / checkpoint namespace"
},
"from_agent": {
"type": "string",
"pattern": "^[a-z0-9-]+$"
},
"to_agent": {
"type": ["string", "null"],
"pattern": "^[a-z0-9-]+$"
},
"kind": {
"type": "string",
"enum": ["task", "result", "error", "approval_request", "approval_response"]
},
"payload": {
"type": "object",
"additionalProperties": true
},
"metadata": {
"type": "object",
"additionalProperties": true
}
},
"additionalProperties": false
}

View File

@ -0,0 +1,17 @@
FROM python:3.11-slim
ENV PYTHONDONTWRITEBYTECODE=1 PYTHONUNBUFFERED=1 PIP_NO_CACHE_DIR=1
RUN apt-get update \
&& apt-get install -y --no-install-recommends curl ca-certificates \
&& rm -rf /var/lib/apt/lists/*
ARG KUBECTL_VERSION=v1.30.2
RUN curl -fsSL "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl" -o /usr/local/bin/kubectl \
&& chmod +x /usr/local/bin/kubectl
RUN pip install --no-cache-dir \
"temporalio>=1.8.0" \
"kubernetes>=30.0.0" \
"psycopg[binary]>=3.2.0" \
"langgraph>=0.2.0" \
"langgraph-checkpoint-postgres>=2.0.0" \
"langchain-openai>=0.2.0" \
"mcp[cli]>=1.2.0"
WORKDIR /worker

View File

@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- litellm/service.yaml
- litellm/deployment.yaml
- litellm/configmap.yaml
- litellm/stub-secrets.yaml
- ollama/service.yaml
- ollama/statefulset.yaml

View File

@ -0,0 +1,28 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: litellm-config
namespace: ai-core
data:
config.yaml: |
model_list:
- model_name: ollama-qwen
litellm_params:
model: ollama/qwen2.5:14b
api_base: http://ollama.ai-core.svc.cluster.local:11434
- model_name: ollama-gemma
litellm_params:
model: ollama/gemma:7b
api_base: http://ollama.ai-core.svc.cluster.local:11434
router_settings:
routing_strategy: simple-shuffle
num_retries: 2
timeout: 120
allowed_fails: 2
context_window_fallbacks:
- ollama-qwen
- ollama-gemma
litellm_settings:
max_budget: 5
budget_duration: 1d
# Virtual keys / teams: use $20/day for dev-agent keys in LiteLLM admin or separate deployment profile.

View File

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: litellm
namespace: ai-core
labels:
app.kubernetes.io/name: litellm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: litellm
template:
metadata:
labels:
app.kubernetes.io/name: litellm
spec:
containers:
- name: litellm
image: ghcr.io/berriai/litellm:main-v1.55.10
args:
- "--config"
- "/etc/litellm/config.yaml"
ports:
- containerPort: 4000
name: http
env:
- name: LITELLM_MASTER_KEY
valueFrom:
secretKeyRef:
name: litellm-master
key: LITELLM_MASTER_KEY
- name: LANGFUSE_PUBLIC_KEY
valueFrom:
secretKeyRef:
name: litellm-langfuse
key: public_key
optional: true
- name: LANGFUSE_SECRET_KEY
valueFrom:
secretKeyRef:
name: litellm-langfuse
key: secret_key
optional: true
- name: LANGFUSE_HOST
value: "http://langfuse.observability.svc.cluster.local:3000"
volumeMounts:
- name: config
mountPath: /etc/litellm
readOnly: true
resources:
requests:
cpu: 250m
memory: 512Mi
limits:
cpu: "2"
memory: 2Gi
volumes:
- name: config
configMap:
name: litellm-config

View File

@ -0,0 +1,41 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: litellm-master-key
namespace: ai-core
spec:
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: infisical
target:
name: litellm-master
creationPolicy: Owner
data:
- secretKey: LITELLM_MASTER_KEY
remoteRef:
key: /agentic-os/litellm
property: master_key
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: litellm-langfuse-keys
namespace: ai-core
spec:
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: infisical
target:
name: litellm-langfuse
creationPolicy: Owner
data:
- secretKey: public_key
remoteRef:
key: /agentic-os/langfuse
property: public_key
- secretKey: secret_key
remoteRef:
key: /agentic-os/langfuse
property: secret_key

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: litellm
namespace: ai-core
labels:
app.kubernetes.io/name: litellm
spec:
selector:
app.kubernetes.io/name: litellm
ports:
- name: http
port: 4000
targetPort: 4000

View File

@ -0,0 +1,19 @@
# Replace via ExternalSecrets in production. Required keys for the bundled Deployment manifests.
apiVersion: v1
kind: Secret
metadata:
name: litellm-master
namespace: ai-core
type: Opaque
stringData:
LITELLM_MASTER_KEY: change-me-litellm-master
---
apiVersion: v1
kind: Secret
metadata:
name: litellm-langfuse
namespace: ai-core
type: Opaque
stringData:
public_key: ""
secret_key: ""

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: ollama
namespace: ai-core
labels:
app.kubernetes.io/name: ollama
spec:
clusterIP: None
selector:
app.kubernetes.io/name: ollama
ports:
- name: http
port: 11434
targetPort: 11434

View File

@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ollama
namespace: ai-core
labels:
app.kubernetes.io/name: ollama
spec:
serviceName: ollama
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: ollama
template:
metadata:
labels:
app.kubernetes.io/name: ollama
spec:
containers:
- name: ollama
image: ollama/ollama:0.4.4
ports:
- containerPort: 11434
name: http
env:
- name: OLLAMA_HOST
value: "0.0.0.0"
resources:
requests:
cpu: "2"
memory: 4Gi
limits:
cpu: "8"
memory: 16Gi
volumeMounts:
- name: models
mountPath: /root/.ollama
volumeClaimTemplates:
- metadata:
name: models
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi

View File

@ -0,0 +1,55 @@
# Tune to match the Temporal Helm chart version in app-temporal.yaml.
# Postgres runs in platform-data (CloudNativePG). Inject credentials via ExternalSecret in production.
postgresql:
enabled: false
cassandra:
enabled: false
schema:
createDatabase:
enabled: true
setup:
enabled: true
update:
enabled: true
server:
replicaCount: 1
config:
persistence:
defaultStore: default
visibilityStore: default
numHistoryShards: 4
datastores:
default:
sql:
plugin: postgres12
databaseName: temporal
connectAddr: agentic-os-pg-rw.platform-data.svc.cluster.local:5432
connectProtocol: tcp
user: agentic_os
password: ""
maxConns: 20
maxIdleConns: 20
maxConnLifetime: "1h"
tls:
enabled: false
visibility:
sql:
plugin: postgres12
databaseName: temporal_visibility
connectAddr: agentic-os-pg-rw.platform-data.svc.cluster.local:5432
connectProtocol: tcp
user: agentic_os
password: ""
maxConns: 20
maxIdleConns: 20
maxConnLifetime: "1h"
tls:
enabled: false
metrics:
enabled: true
elasticsearch:
enabled: false
prometheus:
enabled: false
grafana:
enabled: false

View File

@ -0,0 +1,7 @@
export default function RootLayout({ children }) {
return (
<html lang="en">
<body style={{ fontFamily: "system-ui", margin: 0, padding: 24 }}>{children}</body>
</html>
);
}

View File

@ -0,0 +1,8 @@
export default function Page() {
return (
<main>
<h1>Agentic OS HQ</h1>
<p>Next.js shell scaffold. Wire this UI to Temporal and Langfuse in later iterations.</p>
</main>
);
}

View File

@ -0,0 +1,6 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
reactStrictMode: true,
};
export default nextConfig;

View File

@ -0,0 +1,14 @@
{
"name": "agentic-os-hq-dashboard",
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start"
},
"dependencies": {
"next": "14.2.18",
"react": "18.3.1",
"react-dom": "18.3.1"
}
}

View File

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- placeholder.yaml

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: interface-gitops-placeholder
namespace: interface
data:
note: "Replace with Slack bot and HQ dashboard workloads when ready."

View File

@ -0,0 +1,8 @@
{
"name": "agentic-os-slack-bot",
"private": true,
"type": "module",
"dependencies": {
"@slack/bolt": "^4.1.0"
}
}

View File

@ -0,0 +1,21 @@
import { App } from "@slack/bolt";
const app = new App({
token: process.env.SLACK_BOT_TOKEN,
signingSecret: process.env.SLACK_SIGNING_SECRET,
socketMode: true,
appToken: process.env.SLACK_APP_TOKEN,
});
app.message("ping", async ({ say }) => {
await say("Agentic OS slack-bot scaffold is online.");
});
async function main() {
await app.start(process.env.PORT ? Number(process.env.PORT) : 3000);
}
main().catch((err) => {
console.error(err);
process.exit(1);
});

View File

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- langfuse/secrets-stub.yaml
- langfuse/deployment.yaml
- langfuse/service.yaml
- monitoring

View File

@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: langfuse-web
namespace: observability
labels:
app.kubernetes.io/name: langfuse
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: langfuse
template:
metadata:
labels:
app.kubernetes.io/name: langfuse
spec:
containers:
- name: langfuse
image: langfuse/langfuse:2.93.4
ports:
- containerPort: 3000
name: http
envFrom:
- secretRef:
name: langfuse-server
resources:
requests:
cpu: 250m
memory: 512Mi
limits:
cpu: "2"
memory: 2Gi

View File

@ -0,0 +1,17 @@
# Optional: swap stub secrets for ExternalSecrets targeting Vault/Infisical.
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: langfuse-server
namespace: observability
spec:
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: infisical
target:
name: langfuse-server
creationPolicy: Owner
dataFrom:
- extract:
key: /agentic-os/langfuse-server

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Secret
metadata:
name: langfuse-server
namespace: observability
type: Opaque
stringData:
DATABASE_URL: postgresql://agentic_os:change-me@agentic-os-pg-rw.platform-data.svc.cluster.local:5432/langfuse
NEXTAUTH_URL: http://langfuse.observability.svc.cluster.local:3000
NEXTAUTH_SECRET: change-me-nextauth-secret-at-least-32-chars!!
SALT: change-me-salt-at-least-32-chars!!!!!!
ENCRYPTION_KEY: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: langfuse
namespace: observability
labels:
app.kubernetes.io/name: langfuse
spec:
selector:
app.kubernetes.io/name: langfuse
ports:
- name: http
port: 3000
targetPort: 3000

View File

@ -0,0 +1,3 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: []

View File

@ -0,0 +1,25 @@
Agentic OS Helm / manifest pins (update together when bumping).
Last reviewed: 2026-05-10
Git remote (Argo CD source):
http://192.168.8.248:3000/deepkoluguri/agentic-os.git
Optional Argo repo Secret (HTTP / insecure): platform/bootstrap/argocd-gitea-repo-secret.example.yaml
Argo CD bootstrap (one-time kustomize remote resource):
argoproj/argo-cd v3.4.1 manifests/install.yaml
File: platform/bootstrap/initial-argocd/kustomization.yaml
Argo CD Applications (Helm charts):
jetstack/cert-manager v1.20.2 app-cert-manager.yaml
external-secrets/external-secrets 0.14.4 app-external-secrets.yaml
cloudnative-pg/cloudnative-pg 0.24.0 app-cnpg-operator.yaml
temporal/temporal 0.55.0 app-temporal.yaml (1.x chart needs values rewrite before bump)
prometheus-community/kube-prometheus-stack 84.4.0 app-kube-prometheus.yaml
Verify locally (no cluster required):
pwsh -File scripts/verify-kustomize.ps1
Optional remote bootstrap check (fetches Argo install.yaml):
pwsh -File scripts/verify-kustomize.ps1 -IncludeRemoteArgoCD
Helm chart diff (requires helm + repos):
helm template ...

View File

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: agents-runtime
namespace: argocd
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: agents/k8s
destination:
server: https://kubernetes.default.svc
namespace: ai-agents-gumbo
syncPolicy:
automated:
prune: false
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ai-core
namespace: argocd
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: ai-core
destination:
server: https://kubernetes.default.svc
namespace: ai-core
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-3"
spec:
project: agentic-os
source:
chart: cert-manager
repoURL: https://charts.jetstack.io
targetRevision: v1.20.2
helm:
values: |
installCRDs: true
destination:
server: https://kubernetes.default.svc
namespace: cert-manager
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,22 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cloudnative-pg-operator
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-2"
spec:
project: agentic-os
source:
chart: cloudnative-pg
repoURL: https://cloudnative-pg.github.io/charts
targetRevision: 0.24.0
destination:
server: https://kubernetes.default.svc
namespace: cnpg-system
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: external-secrets
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-2"
spec:
project: agentic-os
source:
chart: external-secrets
repoURL: https://charts.external-secrets.io
targetRevision: 0.14.4
helm:
values: |
installCRDs: true
destination:
server: https://kubernetes.default.svc
namespace: platform-security
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: interface
namespace: argocd
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: interface/k8s
destination:
server: https://kubernetes.default.svc
namespace: interface
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-prometheus-stack
namespace: argocd
spec:
project: agentic-os
source:
chart: kube-prometheus-stack
repoURL: https://prometheus-community.github.io/helm-charts
targetRevision: 84.4.0
helm:
values: |
prometheus:
prometheusSpec:
retention: 15d
grafana:
enabled: true
alertmanager:
enabled: true
destination:
server: https://kubernetes.default.svc
namespace: observability
syncPolicy:
automated:
prune: false
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: observability
namespace: argocd
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: observability
destination:
server: https://kubernetes.default.svc
namespace: observability
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: platform-data
namespace: argocd
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: platform/data
destination:
server: https://kubernetes.default.svc
namespace: platform-data
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,18 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: platform-networking
namespace: argocd
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: platform/networking
destination:
server: https://kubernetes.default.svc
namespace: kube-system
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: platform-security
namespace: argocd
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: platform/security
destination:
server: https://kubernetes.default.svc
namespace: platform-security
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: temporal
namespace: argocd
annotations:
agentic-os.io/helm-chart-notes: "Pinned 0.55.x; Temporal chart 1.x uses a different values schema—bump targetRevision only after rewriting ai-core/temporal/helm-values.yaml against that chart."
spec:
project: agentic-os
destination:
server: https://kubernetes.default.svc
namespace: ai-core
sources:
- repoURL: https://go.temporal.io/helm-charts
chart: temporal
targetRevision: 0.55.0
helm:
valueFiles:
- $repo/helm-values.yaml
- repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
ref: repo
path: ai-core/temporal
syncPolicy:
automated:
prune: false
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: tools-mcp
namespace: argocd
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: tools-mcp
destination:
server: https://kubernetes.default.svc
namespace: tools-mcp
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-cert-manager.yaml
- app-external-secrets.yaml
- app-cnpg-operator.yaml
- app-platform-networking.yaml
- app-platform-security.yaml
- app-platform-data.yaml
- app-observability.yaml
- app-kube-prometheus.yaml
- app-ai-core.yaml
- app-temporal.yaml
- app-tools-mcp.yaml
- app-agents-runtime.yaml
- app-interface.yaml

View File

@ -0,0 +1,16 @@
# Optional: apply manually if Argo CD refuses plain HTTP or your repo is private.
# kubectl apply -f platform/bootstrap/argocd-gitea-repo-secret.example.yaml
# For private repos, set username + password (Gitea PAT or user password).
apiVersion: v1
kind: Secret
metadata:
name: agentic-os-gitea
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
stringData:
type: git
url: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
insecure: "true"
username: ""
password: ""

View File

@ -0,0 +1,8 @@
# One-time bootstrap: Argo CD must exist before GitOps takes over.
# Apply from repo root (after replacing nothing): kubectl apply -k platform/bootstrap/initial-argocd
# Pin the tag to a release you have audited (see platform/bootstrap/CHART_PINS.txt).
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: argocd
resources:
- https://raw.githubusercontent.com/argoproj/argo-cd/v3.4.1/manifests/install.yaml

View File

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespaces.yaml
- root-app-project.yaml
- root-application.yaml

View File

@ -0,0 +1,64 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd
labels:
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: v1
kind: Namespace
metadata:
name: platform-data
labels:
agentic-os.io/layer: "data"
---
apiVersion: v1
kind: Namespace
metadata:
name: platform-security
labels:
agentic-os.io/layer: "security"
---
apiVersion: v1
kind: Namespace
metadata:
name: ai-core
labels:
agentic-os.io/layer: "ai-core"
---
apiVersion: v1
kind: Namespace
metadata:
name: tools-mcp
labels:
agentic-os.io/layer: "mcp"
---
apiVersion: v1
kind: Namespace
metadata:
name: observability
labels:
agentic-os.io/layer: "observability"
---
apiVersion: v1
kind: Namespace
metadata:
name: ai-agents-gumbo
labels:
agentic-os.io/ai-agent-namespace: "true"
agentic-os.io/agent: "gumbo"
---
apiVersion: v1
kind: Namespace
metadata:
name: ai-agents-bernard
labels:
agentic-os.io/ai-agent-namespace: "true"
agentic-os.io/agent: "bernard"
---
apiVersion: v1
kind: Namespace
metadata:
name: interface
labels:
agentic-os.io/layer: "interface"

View File

@ -0,0 +1,28 @@
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: agentic-os
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "0"
spec:
description: GitOps project for Agentic OS platform and workloads
sourceRepos:
- "*"
destinations:
- namespace: "*"
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: ""
kind: Namespace
- group: "apiextensions.k8s.io"
kind: CustomResourceDefinition
- group: "rbac.authorization.k8s.io"
kind: ClusterRole
- group: "rbac.authorization.k8s.io"
kind: ClusterRoleBinding
- group: "cilium.io"
kind: CiliumClusterwideNetworkPolicy
namespaceResourceWhitelist:
- group: "*"
kind: "*"

View File

@ -0,0 +1,25 @@
# Root "app of apps". Point repoURL to your fork after clone.
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: agentic-os-root
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "5"
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: agentic-os
source:
repoURL: http://192.168.8.248:3000/deepkoluguri/agentic-os.git
targetRevision: main
path: platform/bootstrap/apps
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,14 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: agentic-os-pg
namespace: platform-data
spec:
instances: 1
primaryUpdateStrategy: unsupervised
storage:
size: 20Gi
bootstrap:
initdb:
database: agentic_os
owner: agentic_os

View File

@ -0,0 +1,43 @@
apiVersion: postgresql.cnpg.io/v1
kind: Database
metadata:
name: temporal
namespace: platform-data
spec:
name: temporal
owner: agentic_os
cluster:
name: agentic-os-pg
---
apiVersion: postgresql.cnpg.io/v1
kind: Database
metadata:
name: langfuse
namespace: platform-data
spec:
name: langfuse
owner: agentic_os
cluster:
name: agentic-os-pg
---
apiVersion: postgresql.cnpg.io/v1
kind: Database
metadata:
name: temporal-visibility
namespace: platform-data
spec:
name: temporal_visibility
owner: agentic_os
cluster:
name: agentic-os-pg
---
apiVersion: postgresql.cnpg.io/v1
kind: Database
metadata:
name: gumbo
namespace: platform-data
spec:
name: gumbo
owner: agentic_os
cluster:
name: agentic-os-pg

View File

@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cnpg/cluster.yaml
- cnpg/databases.yaml
- redis/redis.yaml
- minio/minio.yaml
- qdrant/qdrant.yaml

View File

@ -0,0 +1,86 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-root
namespace: platform-data
type: Opaque
stringData:
rootUser: agentic-os-minio
rootPassword: change-me-minio-root
---
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: platform-data
labels:
app.kubernetes.io/name: minio
spec:
ports:
- port: 9000
targetPort: 9000
name: api
- port: 9001
targetPort: 9001
name: console
selector:
app.kubernetes.io/name: minio
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: minio
namespace: platform-data
spec:
serviceName: minio
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: minio
template:
metadata:
labels:
app.kubernetes.io/name: minio
spec:
containers:
- name: minio
image: quay.io/minio/minio:RELEASE.2024-11-07T00-52-20Z
args:
- server
- /data
- --console-address
- ":9001"
env:
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: minio-root
key: rootUser
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: minio-root
key: rootPassword
ports:
- containerPort: 9000
name: api
- containerPort: 9001
name: console
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: 250m
memory: 512Mi
limits:
cpu: "2"
memory: 2Gi
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi

View File

@ -0,0 +1,60 @@
apiVersion: v1
kind: Service
metadata:
name: qdrant
namespace: platform-data
labels:
app.kubernetes.io/name: qdrant
spec:
ports:
- port: 6333
targetPort: 6333
name: http
- port: 6334
targetPort: 6334
name: grpc
selector:
app.kubernetes.io/name: qdrant
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: qdrant
namespace: platform-data
spec:
serviceName: qdrant
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: qdrant
template:
metadata:
labels:
app.kubernetes.io/name: qdrant
spec:
containers:
- name: qdrant
image: qdrant/qdrant:v1.12.4
ports:
- containerPort: 6333
name: http
- containerPort: 6334
name: grpc
volumeMounts:
- name: data
mountPath: /qdrant/storage
resources:
requests:
cpu: 250m
memory: 512Mi
limits:
cpu: "2"
memory: 2Gi
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi

View File

@ -0,0 +1,60 @@
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: platform-data
labels:
app.kubernetes.io/name: redis
spec:
ports:
- port: 6379
targetPort: 6379
name: redis
selector:
app.kubernetes.io/name: redis
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis
namespace: platform-data
spec:
serviceName: redis
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis
template:
metadata:
labels:
app.kubernetes.io/name: redis
spec:
containers:
- name: redis
image: redis:7.4-alpine
ports:
- containerPort: 6379
name: redis
args:
- "--save"
- ""
- "--appendonly"
- "no"
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: "1"
memory: 512Mi
volumeMounts:
- name: data
mountPath: /data
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi

View File

@ -0,0 +1,55 @@
# Least-privilege egress for agent Job namespaces.
# - DNS (kube-dns)
# - LiteLLM (all LLM traffic is proxied here)
# - MCP tool plane
# - Postgres (LangGraph AsyncPostgresSaver checkpoints + structured agent state)
#
# Object storage (MinIO) remains reachable only from MCP servers, not from agent pods.
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: agentic-os-ai-agents-egress
spec:
endpointSelector:
matchExpressions:
- key: io.kubernetes.metadata.namespace
operator: In
values:
- ai-agents-gumbo
- ai-agents-bernard
egress:
- toEndpoints:
- matchLabels:
k8s:io.kubernetes.pod.namespace: kube-system
k8s-app: kube-dns
toPorts:
- ports:
- port: "53"
protocol: UDP
- port: "53"
protocol: TCP
- toEndpoints:
- matchLabels:
k8s:io.kubernetes.pod.namespace: ai-core
app.kubernetes.io/name: litellm
toPorts:
- ports:
- port: "4000"
protocol: TCP
- toEndpoints:
- matchLabels:
k8s:io.kubernetes.pod.namespace: tools-mcp
toPorts:
- ports:
- port: "8080"
protocol: TCP
- port: "3000"
protocol: TCP
- toEndpoints:
- matchLabels:
k8s:io.kubernetes.pod.namespace: platform-data
cnpg.io/cluster: agentic-os-pg
toPorts:
- ports:
- port: "5432"
protocol: TCP

View File

@ -0,0 +1,21 @@
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: litellm-ingress-from-agents
namespace: ai-core
spec:
endpointSelector:
matchLabels:
app.kubernetes.io/name: litellm
ingress:
- fromEndpoints:
- matchExpressions:
- key: io.kubernetes.metadata.namespace
operator: In
values:
- ai-agents-gumbo
- ai-agents-bernard
toPorts:
- ports:
- port: "4000"
protocol: TCP

View File

@ -0,0 +1,3 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: []

View File

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- gateway-api
- traefik
- cilium/policies/clusterwide-agents-egress.yaml
- cilium/policies/cnp-litellm-ingress-from-agents.yaml

View File

@ -0,0 +1,3 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: []

View File

@ -0,0 +1,7 @@
# Replace with ACME/Let's Encrypt or private CA issuer matching your cluster.
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigned-bootstrap
spec:
selfSigned: {}

View File

@ -0,0 +1,19 @@
# Wire Infisical as the backing store for ExternalSecrets. Replace host/tokenRef before sync.
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: infisical
spec:
provider:
infisical:
host: https://app.infisical.com
auth:
universalAuthCredentials:
clientId:
key: clientId
namespace: platform-security
name: infisical-universal-auth
clientSecret:
key: clientSecret
namespace: platform-security
name: infisical-universal-auth

View File

@ -0,0 +1,18 @@
# Alternative: HashiCorp Vault KV v2. Enable one ClusterSecretStore in your environment.
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: vault-kv2
spec:
provider:
vault:
server: "https://vault.platform-security.svc:8200"
path: "secret"
version: "v2"
auth:
kubernetes:
mountPath: "kubernetes"
role: "external-secrets"
serviceAccountRef:
name: external-secrets
namespace: platform-security

View File

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- external-secrets/cluster-secret-store-infisical.yaml
- external-secrets/cluster-secret-store-vault.yaml
- cert-manager/cluster-issuer-stub.yaml

View File

@ -0,0 +1,55 @@
param(
[switch] $IncludeRemoteArgoCD
)
$ErrorActionPreference = "Stop"
$RepoRoot = Resolve-Path (Join-Path $PSScriptRoot "..")
function Test-KustomizeDir {
param(
[Parameter(Mandatory = $true)][string] $RelativePath,
[Parameter(Mandatory = $true)][string] $Label
)
$path = Join-Path $RepoRoot $RelativePath
if (-not (Test-Path $path)) {
throw "Missing kustomize path: $path"
}
Write-Host "== $Label ($RelativePath) ==" -ForegroundColor Cyan
$out = & kubectl kustomize $path 2>&1
if ($LASTEXITCODE -ne 0) {
Write-Host $out
throw "kubectl kustomize failed for $RelativePath (exit $LASTEXITCODE)"
}
if (-not $out) {
throw "kubectl kustomize produced empty output for $RelativePath"
}
$docCount = ($out | Select-String -Pattern "^---$" -AllMatches).Matches.Count + 1
Write-Host ("OK ({0} YAML documents emitted)" -f $docCount) -ForegroundColor Green
}
$targets = @(
@{ Rel = "platform\bootstrap"; Label = "platform-bootstrap" },
@{ Rel = "platform\bootstrap\apps"; Label = "argocd-app-of-apps" },
@{ Rel = "platform\networking"; Label = "platform-networking" },
@{ Rel = "platform\security"; Label = "platform-security" },
@{ Rel = "platform\data"; Label = "platform-data" },
@{ Rel = "observability"; Label = "observability" },
@{ Rel = "ai-core"; Label = "ai-core" },
@{ Rel = "tools-mcp"; Label = "tools-mcp" },
@{ Rel = "agents\k8s"; Label = "agents-k8s" },
@{ Rel = "interface\k8s"; Label = "interface-k8s" }
)
foreach ($t in $targets) {
Test-KustomizeDir -RelativePath $t.Rel -Label $t.Label
}
if ($IncludeRemoteArgoCD) {
Test-KustomizeDir -RelativePath "platform\bootstrap\initial-argocd" -Label "initial-argocd (remote)"
}
Write-Host "All requested kustomize builds succeeded." -ForegroundColor Green

View File

@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- mcp-filesystem/minio-secret.yaml
- mcp-filesystem/deployment.yaml
- mcp-filesystem/service.yaml
- mcp-github/secret-stub.yaml
- mcp-github/deployment.yaml
- mcp-github/service.yaml

View File

@ -0,0 +1,7 @@
FROM python:3.11-slim
WORKDIR /app
RUN pip install --no-cache-dir "mcp[cli]>=1.2.0" boto3
COPY app/server.py /app/server.py
ENV HOST=0.0.0.0
ENV PORT=8080
CMD ["python", "/app/server.py"]

View File

@ -0,0 +1,47 @@
"""Read-only MinIO (S3) bridge exposed as MCP tools over SSE."""
from __future__ import annotations
import os
import boto3
from mcp.server.fastmcp import FastMCP
BUCKET = os.environ.get("MCP_FS_BUCKET", "agent-workspaces")
ENDPOINT = os.environ.get("AWS_ENDPOINT_URL", "http://minio.platform-data.svc.cluster.local:9000")
REGION = os.environ.get("AWS_REGION", "us-east-1")
HOST = os.environ.get("HOST", "0.0.0.0")
PORT = int(os.environ.get("PORT", "8080"))
_session = boto3.session.Session()
_client = _session.client(
"s3",
endpoint_url=ENDPOINT,
region_name=REGION,
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
)
mcp = FastMCP("agentic-os-mcp-filesystem", host=HOST, port=PORT)
@mcp.tool()
def get_object_text(key: str) -> str:
"""Return the UTF-8 text body for `key` in the configured read-only bucket."""
obj = _client.get_object(Bucket=BUCKET, Key=key)
body: bytes = obj["Body"].read()
return body.decode("utf-8", errors="replace")
@mcp.tool()
def list_objects(prefix: str = "") -> list[str]:
"""List object keys under optional prefix (read-only)."""
keys: list[str] = []
paginator = _client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=BUCKET, Prefix=prefix):
for item in page.get("Contents", []) or []:
keys.append(item["Key"])
return keys
if __name__ == "__main__":
mcp.run(transport="sse")

View File

@ -0,0 +1,57 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mcp-filesystem
namespace: tools-mcp
labels:
app.kubernetes.io/name: mcp-filesystem
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mcp-filesystem
template:
metadata:
labels:
app.kubernetes.io/name: mcp-filesystem
spec:
containers:
- name: mcp
image: agentic-os/mcp-filesystem:latest
imagePullPolicy: IfNotPresent
env:
- name: MCP_FS_BUCKET
value: agent-workspaces
- name: AWS_ENDPOINT_URL
value: http://minio.platform-data.svc.cluster.local:9000
- name: AWS_REGION
value: us-east-1
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: mcp-filesystem-minio
key: access_key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: mcp-filesystem-minio
key: secret_key
- name: HOST
value: "0.0.0.0"
- name: PORT
value: "8080"
ports:
- containerPort: 8080
name: mcp
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 3
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: "1"
memory: 512Mi

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: mcp-filesystem-minio
namespace: tools-mcp
type: Opaque
stringData:
access_key: agentic-os-minio
secret_key: change-me-minio-root

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: mcp-filesystem
namespace: tools-mcp
labels:
app.kubernetes.io/name: mcp-filesystem
spec:
selector:
app.kubernetes.io/name: mcp-filesystem
ports:
- name: mcp
port: 8080
targetPort: 8080

View File

@ -0,0 +1,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mcp-github
namespace: tools-mcp
labels:
app.kubernetes.io/name: mcp-github
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mcp-github
template:
metadata:
labels:
app.kubernetes.io/name: mcp-github
spec:
containers:
- name: mcp
image: python:3.11-slim
imagePullPolicy: IfNotPresent
command: ["sleep", "infinity"]
ports:
- containerPort: 3000
name: mcp
resources:
requests:
cpu: 100m
memory: 256Mi

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: mcp-github-token
namespace: tools-mcp
type: Opaque
stringData:
token: ""

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: mcp-github
namespace: tools-mcp
labels:
app.kubernetes.io/name: mcp-github
spec:
selector:
app.kubernetes.io/name: mcp-github
ports:
- name: mcp
port: 3000
targetPort: 3000