169 lines
3.4 KiB
Go
169 lines
3.4 KiB
Go
package openai
|
|
|
|
import (
|
|
"bytes"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"io/ioutil"
|
|
"net/http"
|
|
|
|
"wechatbot/config"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
)
|
|
|
|
type ChatMessage struct {
|
|
Role string `json:"role"`
|
|
Content string `json:"content"`
|
|
}
|
|
|
|
// ChatGPTRequestBody 请求体
|
|
type ChatGPTRequestBody struct {
|
|
Model string `json:"model"`
|
|
Messages []ChatMessage `json:"messages"`
|
|
}
|
|
|
|
type ResponseChoice struct {
|
|
Index int `json:"index"`
|
|
Message ChatMessage `json:"message"`
|
|
FinishReason string `json:"finish_reason"`
|
|
}
|
|
|
|
type ResponseUsage struct {
|
|
PromptTokens int `json:"prompt_tokens"`
|
|
CompletionTokens int `json:"completion_tokens"`
|
|
TotalTokens int `json:"total_tokens"`
|
|
}
|
|
|
|
// ChatGPTResponseBody 响应体
|
|
type ChatGPTResponseBody struct {
|
|
ID string `json:"id"`
|
|
Object string `json:"object"`
|
|
Created int `json:"created"`
|
|
Choices []ResponseChoice `json:"choices"`
|
|
Usage ResponseUsage `json:"usage"`
|
|
}
|
|
|
|
type ChatGPTErrorBody struct {
|
|
Error map[string]interface{} `json:"error"`
|
|
}
|
|
|
|
/*
|
|
curl https://api.openai.com/v1/chat/completions \
|
|
-H 'Content-Type: application/json' \
|
|
-H 'Authorization: Bearer YOUR_API_KEY' \
|
|
-d '{
|
|
"model": "gpt-3.5-turbo",
|
|
"messages": [{"role": "user", "content": "Hello!"}]
|
|
}'
|
|
|
|
{
|
|
"model": "gpt-3.5-turbo",
|
|
"messages": [{"role": "user", "content": "Hello!"}]
|
|
}
|
|
|
|
{
|
|
"id": "chatcmpl-123",
|
|
"object": "chat.completion",
|
|
"created": 1677652288,
|
|
"choices": [{
|
|
"index": 0,
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": "\n\nHello there, how may I assist you today?",
|
|
},
|
|
"finish_reason": "stop"
|
|
}],
|
|
"usage": {
|
|
"prompt_tokens": 9,
|
|
"completion_tokens": 12,
|
|
"total_tokens": 21
|
|
}
|
|
}
|
|
|
|
*/
|
|
|
|
// Completions sendMsg
|
|
func Completions(msg string) (*string, error) {
|
|
apiKey := config.GetOpenAiApiKey()
|
|
if apiKey == nil {
|
|
return nil, errors.New("未配置apiKey")
|
|
}
|
|
|
|
var messages []ChatMessage
|
|
messages = append(messages, ChatMessage{
|
|
Role: "user",
|
|
Content: msg,
|
|
})
|
|
|
|
requestBody := ChatGPTRequestBody{
|
|
Model: "gpt-3.5-turbo",
|
|
Messages: messages,
|
|
}
|
|
requestData, err := json.Marshal(requestBody)
|
|
|
|
if err != nil {
|
|
log.Error(err)
|
|
return nil, err
|
|
}
|
|
|
|
log.Debugf("request openai json string : %v", string(requestData))
|
|
req, err := http.NewRequest("POST", "https://api.openai.com/v1/chat/completions", bytes.NewBuffer(requestData))
|
|
if err != nil {
|
|
log.Error(err)
|
|
return nil, err
|
|
}
|
|
|
|
req.Header.Set("Content-Type", "application/json")
|
|
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *apiKey))
|
|
client := &http.Client{}
|
|
response, err := client.Do(req)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
defer func(Body io.ReadCloser) {
|
|
err := Body.Close()
|
|
if err != nil {
|
|
return
|
|
}
|
|
}(response.Body)
|
|
|
|
body, err := ioutil.ReadAll(response.Body)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
gptResponseBody := &ChatGPTResponseBody{}
|
|
log.Debug(string(body))
|
|
err = json.Unmarshal(body, gptResponseBody)
|
|
if err != nil {
|
|
log.Error(err)
|
|
return nil, err
|
|
}
|
|
|
|
var reply string
|
|
if len(gptResponseBody.Choices) > 0 {
|
|
for _, v := range gptResponseBody.Choices {
|
|
reply += "\n"
|
|
reply += v.Message.Content
|
|
}
|
|
}
|
|
|
|
if len(reply) == 0 {
|
|
gptErrorBody := &ChatGPTErrorBody{}
|
|
err = json.Unmarshal(body, gptErrorBody)
|
|
if err != nil {
|
|
log.Error(err)
|
|
return nil, err
|
|
}
|
|
|
|
reply += "Error: "
|
|
reply += gptErrorBody.Error["message"].(string)
|
|
}
|
|
|
|
return &reply, nil
|
|
}
|