diff --git a/config.dev.json b/config.dev.json index b78e4db..6234f34 100644 --- a/config.dev.json +++ b/config.dev.json @@ -1,4 +1,5 @@ { "api_key": "your api key", + "proxy": "your proxy", "auto_pass": true } diff --git a/config/config.go b/config/config.go index 6afede1..8c5e594 100644 --- a/config/config.go +++ b/config/config.go @@ -13,6 +13,8 @@ type Configuration struct { ApiKey string `json:"api_key"` // 自动通过好友 AutoPass bool `json:"auto_pass"` + //代理地址 + Proxy string `json:"proxy"` } var config *Configuration @@ -39,12 +41,16 @@ func LoadConfig() *Configuration { // 如果环境变量有配置,读取环境变量 ApiKey := os.Getenv("ApiKey") AutoPass := os.Getenv("AutoPass") + Proxy := os.Getenv("Proxy") if ApiKey != "" { config.ApiKey = ApiKey } if AutoPass == "true" { config.AutoPass = true } + if Proxy != "" { + config.Proxy = Proxy + } }) return config } diff --git a/gtp/gtp.go b/gtp/gtp.go index 15cd8e2..496baa6 100644 --- a/gtp/gtp.go +++ b/gtp/gtp.go @@ -3,47 +3,62 @@ package gtp import ( "bytes" "encoding/json" - "github.com/869413421/wechatbot/config" "io/ioutil" "log" "net/http" + "net/url" + + "github.com/869413421/wechatbot/config" ) const BASEURL = "https://api.openai.com/v1/" // ChatGPTResponseBody 请求体 type ChatGPTResponseBody struct { - ID string `json:"id"` - Object string `json:"object"` - Created int `json:"created"` - Model string `json:"model"` - Choices []map[string]interface{} `json:"choices"` - Usage map[string]interface{} `json:"usage"` + ID string `json:"id"` + Object string `json:"object"` + Created int `json:"created"` + Model string `json:"model"` + Choices []ChoiceItem `json:"choices"` + Usage map[string]interface{} `json:"usage"` } type ChoiceItem struct { + Index int `json:"index"` + FinishReason string `json:"finish_reason"` + Message ChatGPTMessage `json:"message"` } // ChatGPTRequestBody 响应体 type ChatGPTRequestBody struct { - Model string `json:"model"` - Prompt string `json:"prompt"` - MaxTokens int `json:"max_tokens"` - Temperature float32 `json:"temperature"` - TopP int `json:"top_p"` - FrequencyPenalty int `json:"frequency_penalty"` - PresencePenalty int `json:"presence_penalty"` + Model string `json:"model"` + Messages []ChatGPTMessage `json:"messages"` + MaxTokens int `json:"max_tokens"` + Temperature float32 `json:"temperature"` + TopP int `json:"top_p"` + FrequencyPenalty int `json:"frequency_penalty"` + PresencePenalty int `json:"presence_penalty"` + Stop []string `json:"stop"` + User string `json:"user"` +} + +type ChatGPTMessage struct { + Role string `json:"role"` + Content string `json:"content"` } // Completions gtp文本模型回复 -//curl https://api.openai.com/v1/completions -//-H "Content-Type: application/json" -//-H "Authorization: Bearer your chatGPT key" -//-d '{"model": "text-davinci-003", "prompt": "give me good song", "temperature": 0, "max_tokens": 7}' +// curl https://api.openai.com/v1/chat/completions +// -H "Content-Type: application/json" +// -H "Authorization: Bearer your chatGPT key" +// -d '{"model": "gpt-3.5-turbo", "messages": [{"role":"system", "content":"You are a assistant"}, {"role":"user", "content": "give me good song"}], "temperature": 0, "max_tokens": 7}' func Completions(msg string) (string, error) { requestBody := ChatGPTRequestBody{ - Model: "text-davinci-003", - Prompt: msg, + Model: "gpt-3.5-turbo", + Messages: []ChatGPTMessage{ + {Role: "system", Content: "You are a helpful assistant."}, + {Role: "user", Content: msg}, + }, MaxTokens: 2048, Temperature: 0.7, TopP: 1, @@ -56,15 +71,27 @@ func Completions(msg string) (string, error) { return "", err } log.Printf("request gtp json string : %v", string(requestData)) - req, err := http.NewRequest("POST", BASEURL+"completions", bytes.NewBuffer(requestData)) + req, err := http.NewRequest("POST", BASEURL+"chat/completions", bytes.NewBuffer(requestData)) if err != nil { return "", err } apiKey := config.LoadConfig().ApiKey + proxy := config.LoadConfig().Proxy req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "Bearer "+apiKey) - client := &http.Client{} + var client *http.Client + if len(proxy) == 0 { + client = &http.Client{} + } else { + proxyAddr, _ := url.Parse(proxy) + client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyURL(proxyAddr), + }, + } + } + response, err := client.Do(req) if err != nil { return "", err @@ -85,7 +112,7 @@ func Completions(msg string) (string, error) { var reply string if len(gptResponseBody.Choices) > 0 { for _, v := range gptResponseBody.Choices { - reply = v["text"].(string) + reply = v.Message.Content break } }