llm: add health check before request

This commit is contained in:
Chris Sexton 2024-05-11 14:38:43 -04:00
parent f5fc3b542e
commit 6f3ba974e6
1 changed files with 16 additions and 0 deletions

View File

@ -37,6 +37,9 @@ func (g *LLMPlugin) llama() (chatEntry, error) {
} }
for _, u := range llamaURL { for _, u := range llamaURL {
if err := g.healthCheck(u); err != nil {
continue
}
llamaResp, err := mkRequest(u, req) llamaResp, err := mkRequest(u, req)
if err != nil { if err != nil {
continue continue
@ -48,6 +51,19 @@ func (g *LLMPlugin) llama() (chatEntry, error) {
return chatEntry{}, InstanceNotFoundError return chatEntry{}, InstanceNotFoundError
} }
func (p *LLMPlugin) healthCheck(llamaURL string) error {
timeout := p.c.GetInt("gpt.timeout", 1000)
req, _ := http.NewRequest("get", llamaURL, nil)
client := http.Client{
Timeout: time.Duration(timeout) * time.Millisecond,
}
_, err := client.Do(req)
if err != nil {
return err
}
return nil
}
func mkRequest(llamaURL string, req llamaRequest) (llamaResponse, error) { func mkRequest(llamaURL string, req llamaRequest) (llamaResponse, error) {
body, err := json.Marshal(req) body, err := json.Marshal(req)
if err != nil { if err != nil {