From 6f3ba974e61ff8afd599ff201c1bddd4450ef1ce Mon Sep 17 00:00:00 2001 From: Chris Sexton <3216719+chrissexton@users.noreply.github.com> Date: Sat, 11 May 2024 14:38:43 -0400 Subject: [PATCH] llm: add health check before request --- plugins/llm/llama.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/llm/llama.go b/plugins/llm/llama.go index 9e016b3..c8958cb 100644 --- a/plugins/llm/llama.go +++ b/plugins/llm/llama.go @@ -37,6 +37,9 @@ func (g *LLMPlugin) llama() (chatEntry, error) { } for _, u := range llamaURL { + if err := g.healthCheck(u); err != nil { + continue + } llamaResp, err := mkRequest(u, req) if err != nil { continue @@ -48,6 +51,19 @@ func (g *LLMPlugin) llama() (chatEntry, error) { return chatEntry{}, InstanceNotFoundError } +func (p *LLMPlugin) healthCheck(llamaURL string) error { + timeout := p.c.GetInt("gpt.timeout", 1000) + req, _ := http.NewRequest("get", llamaURL, nil) + client := http.Client{ + Timeout: time.Duration(timeout) * time.Millisecond, + } + _, err := client.Do(req) + if err != nil { + return err + } + return nil +} + func mkRequest(llamaURL string, req llamaRequest) (llamaResponse, error) { body, err := json.Marshal(req) if err != nil {