mirror of https://github.com/velour/catbase.git
llm: add health check before request
This commit is contained in:
parent
f5fc3b542e
commit
6f3ba974e6
|
@ -37,6 +37,9 @@ func (g *LLMPlugin) llama() (chatEntry, error) {
|
|||
}
|
||||
|
||||
for _, u := range llamaURL {
|
||||
if err := g.healthCheck(u); err != nil {
|
||||
continue
|
||||
}
|
||||
llamaResp, err := mkRequest(u, req)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -48,6 +51,19 @@ func (g *LLMPlugin) llama() (chatEntry, error) {
|
|||
return chatEntry{}, InstanceNotFoundError
|
||||
}
|
||||
|
||||
func (p *LLMPlugin) healthCheck(llamaURL string) error {
|
||||
timeout := p.c.GetInt("gpt.timeout", 1000)
|
||||
req, _ := http.NewRequest("get", llamaURL, nil)
|
||||
client := http.Client{
|
||||
Timeout: time.Duration(timeout) * time.Millisecond,
|
||||
}
|
||||
_, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mkRequest(llamaURL string, req llamaRequest) (llamaResponse, error) {
|
||||
body, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue