changing to use openrouter
This commit is contained in:
@@ -1,14 +1,9 @@
|
|||||||
GIT_REPOSITORY=https://git.url/user/repo.git
|
GIT_REPOSITORY=https://git.url/user/repo.git
|
||||||
OPENAI_API_KEY=openai_api_key
|
|
||||||
GEMINI_API_KEY=gemini_api_key
|
|
||||||
DISCORD_WEBHOOK_URL=discord_webhook_channel_url
|
DISCORD_WEBHOOK_URL=discord_webhook_channel_url
|
||||||
|
|
||||||
# LLM provider per agent function ("openai" or "gemini", defaults to "openai")
|
# OpenAI-compatible provider (e.g. OpenRouter)
|
||||||
SUMMARY_CREATOR_PROVIDER=gemini
|
OPENAI_API_URL=https://openrouter.ai/api/v1
|
||||||
SUMMARY_FORMATTER_PROVIDER=openai
|
OPENAI_TOKEN=your_token_here
|
||||||
|
OPENAI_MODEL=openai/gpt-5.4-mini
|
||||||
# LLM models
|
|
||||||
GEMINI_MODEL=gemini-3-flash-preview
|
|
||||||
OPENAI_MODEL=gpt-5-mini
|
|
||||||
|
|
||||||
TOP_N_FILES=10
|
TOP_N_FILES=10
|
||||||
@@ -22,16 +22,15 @@ spec:
|
|||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: mindforge-secrets
|
name: mindforge-secrets
|
||||||
key: GIT_REPOSITORY
|
key: GIT_REPOSITORY
|
||||||
- name: GEMINI_API_KEY
|
- name: OPENAI_TOKEN
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: mindforge-secrets
|
name: mindforge-secrets
|
||||||
key: GEMINI_API_KEY
|
key: OPENAI_TOKEN
|
||||||
- name: OPENAI_API_KEY
|
- name: OPENAI_API_URL
|
||||||
valueFrom:
|
value: https://openrouter.ai/api/v1
|
||||||
secretKeyRef:
|
- name: OPENAI_MODEL
|
||||||
name: mindforge-secrets
|
value: openai/gpt-5.4-mini
|
||||||
key: OPENAI_API_KEY
|
|
||||||
- name: DISCORD_WEBHOOK_URL
|
- name: DISCORD_WEBHOOK_URL
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
@@ -42,14 +41,6 @@ spec:
|
|||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: mindforge-secrets
|
name: mindforge-secrets
|
||||||
key: HAVEN_NOTIFY_URL
|
key: HAVEN_NOTIFY_URL
|
||||||
- name: SUMMARY_CREATOR_PROVIDER
|
|
||||||
value: gemini
|
|
||||||
- name: SUMMARY_FORMATTER_PROVIDER
|
|
||||||
value: openai
|
|
||||||
- name: GEMINI_MODEL
|
|
||||||
value: gemini-3-flash-preview
|
|
||||||
- name: OPENAI_MODEL
|
|
||||||
value: gpt-5-mini
|
|
||||||
- name: TOP_N_FILES
|
- name: TOP_N_FILES
|
||||||
value: "10"
|
value: "10"
|
||||||
- name: LAST_N_DAYS
|
- name: LAST_N_DAYS
|
||||||
|
|||||||
@@ -2,50 +2,11 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"mindforge.cronjob/internal/llm"
|
"mindforge.cronjob/internal/llm"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Provider represents the LLM provider to use.
|
|
||||||
type Provider string
|
|
||||||
|
|
||||||
const (
|
|
||||||
ProviderOpenAI Provider = "openai"
|
|
||||||
ProviderGemini Provider = "gemini"
|
|
||||||
)
|
|
||||||
|
|
||||||
// providerFromEnv reads the provider for a given agent from an env var,
|
|
||||||
// defaulting to OpenAI if not set or unrecognised.
|
|
||||||
func providerFromEnv(envKey string) Provider {
|
|
||||||
val := strings.ToLower(strings.TrimSpace(os.Getenv(envKey)))
|
|
||||||
if val == string(ProviderGemini) {
|
|
||||||
return ProviderGemini
|
|
||||||
}
|
|
||||||
return ProviderOpenAI
|
|
||||||
}
|
|
||||||
|
|
||||||
// send routes the request to the given LLM provider.
|
|
||||||
func send(provider Provider, systemPrompt, userPrompt string) (string, error) {
|
|
||||||
llmService := llm.NewLLMService()
|
|
||||||
switch provider {
|
|
||||||
case ProviderGemini:
|
|
||||||
geminiModel := os.Getenv("GEMINI_MODEL")
|
|
||||||
if geminiModel == "" {
|
|
||||||
geminiModel = "gemini-3.1-flash-lite-preview"
|
|
||||||
}
|
|
||||||
return llmService.SendGeminiRequest(systemPrompt, userPrompt, geminiModel)
|
|
||||||
default:
|
|
||||||
openaiModel := os.Getenv("OPENAI_MODEL")
|
|
||||||
if openaiModel == "" {
|
|
||||||
openaiModel = "gpt-5-mini"
|
|
||||||
}
|
|
||||||
return llmService.SendOpenAIRequest(systemPrompt, userPrompt, openaiModel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SummaryCreatorAgent creates a summary of the git diff for a specific file.
|
// SummaryCreatorAgent creates a summary of the git diff for a specific file.
|
||||||
func SummaryCreatorAgent(filePath, gitDiff string) (string, error) {
|
func SummaryCreatorAgent(filePath, gitDiff string) (string, error) {
|
||||||
fileName := filepath.Base(filePath)
|
fileName := filepath.Base(filePath)
|
||||||
@@ -66,7 +27,7 @@ Responda sempre em Português do Brasil (pt-BR).`
|
|||||||
|
|
||||||
userPrompt := fmt.Sprintf("Caminho do arquivo: %s\nPasta (Assunto Principal): %s\nArquivo (Assunto Específico): %s\n\nGit Diff:\n%s", filePath, folderName, fileName, gitDiff)
|
userPrompt := fmt.Sprintf("Caminho do arquivo: %s\nPasta (Assunto Principal): %s\nArquivo (Assunto Específico): %s\n\nGit Diff:\n%s", filePath, folderName, fileName, gitDiff)
|
||||||
|
|
||||||
return send(providerFromEnv("SUMMARY_CREATOR_PROVIDER"), systemPrompt, userPrompt)
|
return llm.NewLLMService().Send(systemPrompt, userPrompt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SummaryFormatterAgent formats a plain text summary into Markdown.
|
// SummaryFormatterAgent formats a plain text summary into Markdown.
|
||||||
@@ -82,5 +43,5 @@ Regras de formatação:
|
|||||||
|
|
||||||
Responda sempre em Português do Brasil (pt-BR).`
|
Responda sempre em Português do Brasil (pt-BR).`
|
||||||
|
|
||||||
return send(providerFromEnv("SUMMARY_FORMATTER_PROVIDER"), systemPrompt, summary)
|
return llm.NewLLMService().Send(systemPrompt, summary)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,86 +0,0 @@
|
|||||||
package llm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *llmService) SendGeminiRequest(systemPrompt string, userPrompt string, model string) (string, error) {
|
|
||||||
apiKey := getEnvConfig("GEMINI_API_KEY")
|
|
||||||
if apiKey == "" {
|
|
||||||
return "", errors.New("GEMINI_API_KEY not found in .env or environment")
|
|
||||||
}
|
|
||||||
|
|
||||||
apiBase := "https://generativelanguage.googleapis.com/v1beta"
|
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/models/%s:generateContent?key=%s", strings.TrimRight(apiBase, "/"), model, apiKey)
|
|
||||||
|
|
||||||
reqBody := map[string]interface{}{}
|
|
||||||
if systemPrompt != "" {
|
|
||||||
reqBody["system_instruction"] = map[string]interface{}{
|
|
||||||
"parts": []map[string]string{
|
|
||||||
{"text": systemPrompt},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reqBody["contents"] = []map[string]interface{}{
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"parts": []map[string]string{
|
|
||||||
{"text": userPrompt},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonBody, err := json.Marshal(reqBody)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
client := &http.Client{Timeout: 120 * time.Second}
|
|
||||||
resp, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
bodyBytes, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return "", fmt.Errorf("Gemini API error status %d: %s", resp.StatusCode, string(bodyBytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
Candidates []struct {
|
|
||||||
Content struct {
|
|
||||||
Parts []struct {
|
|
||||||
Text string `json:"text"`
|
|
||||||
} `json:"parts"`
|
|
||||||
} `json:"content"`
|
|
||||||
} `json:"candidates"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(bodyBytes, &result); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(result.Candidates) > 0 && len(result.Candidates[0].Content.Parts) > 0 {
|
|
||||||
return result.Candidates[0].Content.Parts[0].Text, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", errors.New("empty response from Gemini API")
|
|
||||||
}
|
|
||||||
@@ -6,8 +6,7 @@ import (
|
|||||||
|
|
||||||
// Service defines the interface for connecting to LLMs
|
// Service defines the interface for connecting to LLMs
|
||||||
type Service interface {
|
type Service interface {
|
||||||
SendOpenAIRequest(systemPrompt string, userPrompt string, model string) (string, error)
|
Send(systemPrompt string, userPrompt string) (string, error)
|
||||||
SendGeminiRequest(systemPrompt string, userPrompt string, model string) (string, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type llmService struct{}
|
type llmService struct{}
|
||||||
|
|||||||
@@ -11,15 +11,23 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *llmService) SendOpenAIRequest(systemPrompt string, userPrompt string, model string) (string, error) {
|
func (s *llmService) Send(systemPrompt string, userPrompt string) (string, error) {
|
||||||
apiKey := getEnvConfig("OPENAI_API_KEY")
|
apiURL := getEnvConfig("OPENAI_API_URL")
|
||||||
if apiKey == "" {
|
if apiURL == "" {
|
||||||
return "", errors.New("OPENAI_API_KEY not found in .env or environment")
|
return "", errors.New("OPENAI_API_URL not found in environment")
|
||||||
}
|
}
|
||||||
|
|
||||||
apiBase := "https://api.openai.com/v1"
|
token := getEnvConfig("OPENAI_TOKEN")
|
||||||
|
if token == "" {
|
||||||
|
return "", errors.New("OPENAI_TOKEN not found in environment")
|
||||||
|
}
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/chat/completions", strings.TrimRight(apiBase, "/"))
|
model := getEnvConfig("OPENAI_MODEL")
|
||||||
|
if model == "" {
|
||||||
|
return "", errors.New("OPENAI_MODEL not found in environment")
|
||||||
|
}
|
||||||
|
|
||||||
|
url := fmt.Sprintf("%s/chat/completions", strings.TrimRight(apiURL, "/"))
|
||||||
|
|
||||||
reqBody := map[string]interface{}{
|
reqBody := map[string]interface{}{
|
||||||
"model": model,
|
"model": model,
|
||||||
@@ -42,7 +50,7 @@ func (s *llmService) SendOpenAIRequest(systemPrompt string, userPrompt string, m
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
|
||||||
client := &http.Client{Timeout: 120 * time.Second}
|
client := &http.Client{Timeout: 120 * time.Second}
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
@@ -62,7 +70,7 @@ func (s *llmService) SendOpenAIRequest(systemPrompt string, userPrompt string, m
|
|||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
lastErr = fmt.Errorf("OpenAI API error status %d: %s", resp.StatusCode, string(bodyBytes))
|
lastErr = fmt.Errorf("API error status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
time.Sleep(time.Second * time.Duration(1<<i))
|
time.Sleep(time.Second * time.Duration(1<<i))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -81,8 +89,8 @@ func (s *llmService) SendOpenAIRequest(systemPrompt string, userPrompt string, m
|
|||||||
if len(result.Choices) > 0 {
|
if len(result.Choices) > 0 {
|
||||||
return result.Choices[0].Message.Content, nil
|
return result.Choices[0].Message.Content, nil
|
||||||
}
|
}
|
||||||
return "", errors.New("empty response from OpenAI API")
|
return "", errors.New("empty response from API")
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", fmt.Errorf("failed to get OpenAI response after 5 attempts. Last error: %v", lastErr)
|
return "", fmt.Errorf("failed to get response after 5 attempts. Last error: %v", lastErr)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user