Преглед изворни кода

第一次本地可运行版提交

李伟 пре 2 недеља
родитељ
комит
973539c652

+ 2 - 3
desc/all.api

@@ -14,6 +14,7 @@ import "./wechat/message_records.api"
 import "./wechat/chatroom_member.api"
 import "./wechat/user.api"
 import "./openapi/contact.api"
+import "./openapi/chat.api"
 import "./wechat/batch_msg.api"
 import "./wechat/msg.api"
 import "./wechat/agent.api"
@@ -41,6 +42,4 @@ import "./wechat/credit_balance.api"
 import "./wechat/credit_usage.api"
 import "./wechat/pay_recharge.api"
 import "./wechat/whatsapp.api"
-import "./wechat/whatsapp_channel.api"
-import "./wechat/fastgpt.api"
-import "./wechat/department.api"
+import "./wechat/whatsapp_channel.api"

+ 188 - 0
desc/openapi/chat.api

@@ -0,0 +1,188 @@
+import "../base.api"
+
+type (
+    
+    //以下是API请求类型
+
+	CompApiReq {
+        CompCtlReq
+		StdCompApiReq
+        FastGptSpecReq
+    }
+
+	//FastGpt Completions请求信息
+	FastGptApiReq {
+		StdCompApiReq
+        FastGptSpecReq
+	}
+
+	//标准Completions请求信息
+	StdCompApiReq {
+        //model,like 'gpt-4o'
+        Model string `json:"model,optional"`
+        //Message list
+        Messages []StdCompMessage `json:"messages"`
+        //Stream 是否流式输出
+        Stream bool `json:"stream,default=false"`
+    }
+
+	//关于工作流配置的请求信息
+	CompCtlReq {
+		//EventType事件类型
+        EventType string `json:"event_type,default=fastgpt"`
+        //WorkId工作流ID
+        WorkId string `json:"work_id"`
+		//IsBatch 是同步还是异步,默认及取值false表明同步
+        IsBatch bool `json:"is_batch,default=false"`
+        //异步回调地址
+        Callback string `json:"callback,optional"`
+	}
+
+	FastGptSpecReq {
+        //ChatId
+        ChatId string `json:"chat_id,optional"`
+        //ResponseChatItemId
+        ResponseChatItemId string `json:"response_chat_item_id,optional"`
+        //Detail 详情开关
+        Detail bool `json:"detail,default=false"`
+        //Variables
+        Variables map[string]string `json:"variables,optional"`
+	}
+	
+	StdCompMessage {
+        Role string `json:"role"`
+        Content string `json:"content"`
+    }
+
+    //以下是API响应类型
+	CompOpenApiResp {
+        StdCompApiResp
+		FastgptSpecResp
+    }
+
+	StdCompApiResp {
+        // A unique identifier for the chat completion.
+	    ID string `json:"id"`
+	    // A list of chat completion choices. Can be more than one if `n` is greater
+	    // than 1.
+    	Choices []ChatCompletionChoice `json:"choices"`
+	    // The Unix timestamp (in seconds) of when the chat completion was created.
+	    Created int64 `json:"created"`
+	    // The model used for the chat completion.
+	    Model string `json:"model"`
+	    // The object type, which is always `chat.completion`.
+	    Object string `json:"object"`
+	    // The service tier used for processing the request.
+	    ServiceTier string `json:"service_tier,omitempty"`
+	    // This fingerprint represents the backend configuration that the model runs with.
+	    //
+    	// Can be used in conjunction with the `seed` request parameter to understand when
+	    // backend changes have been made that might impact determinism.
+	    SystemFingerprint string `json:"system_fingerprint"`
+	    // Usage statistics for the completion request.
+	    Usage CompletionUsage    `json:"usage,omitempty"`
+    }
+
+	FastgptSpecResp {
+		ResponseData []map[string]string `json:"responseData,omitempty"`
+		NewVariables map[string]string `json:"newVariables,omitempty"`
+	}
+	
+	ChatCompletionAudio {
+	    // Unique identifier for this audio response.
+	    ID string `json:"id"`
+        //TODO
+    }
+
+    ChatCompletionMessage {
+	    // The contents of the message.
+	    Content string `json:"content"`
+        //The contents of the reasoning message
+        ReasoningContent string `json:"reasoning_content,omitempty"`
+	    // The refusal message generated by the model.
+	    Refusal string `json:"refusal"`
+	    // The role of the author of this message.
+	    Role string `json:"role"`
+	    // If the audio output modality is requested, this object contains data about the
+	    // audio response from the model.
+	    // [Learn more](https://platform.openai.com/docs/guides/audio).
+	    Audio ChatCompletionAudio `json:"audio,omitempty"`
+	}
+
+    ChatCompletionChoice {
+	    // The reason the model stopped generating tokens. This will be `stop` if the model
+	    // hit a natural stop point or a provided stop sequence, `length` if the maximum
+	    // number of tokens specified in the request was reached, `content_filter` if
+	    // content was omitted due to a flag from our content filters, `tool_calls` if the
+	    // model called a tool, or `function_call` (deprecated) if the model called a
+	    // function.
+	    FinishReason string `json:"finish_reason"`
+	    // The index of the choice in the list of choices.
+	    Index int64 `json:"index"`
+	    // A chat completion message generated by the model.
+	    Message ChatCompletionMessage  `json:"message,omitempty"`
+        // A chat completion message generated by the model stream mode.
+	    Delta ChatCompletionMessage  `json:"delta,omitempty"`
+    }
+
+    CompletionUsageCompletionTokensDetails {
+	    // When using Predicted Outputs, the number of tokens in the prediction that
+	    // appeared in the completion.
+	    AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
+	    // Audio input tokens generated by the model.
+	    AudioTokens int64 `json:"audio_tokens"`
+	    // Tokens generated by the model for reasoning.
+	    ReasoningTokens int64 `json:"reasoning_tokens"`
+	    // When using Predicted Outputs, the number of tokens in the prediction that did
+	    // not appear in the completion. However, like reasoning tokens, these tokens are
+	    // still counted in the total completion tokens for purposes of billing, output,
+	    // and context window limits.
+	    RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
+    }
+
+    CompletionUsagePromptTokensDetails {
+	    // Audio input tokens present in the prompt.
+	    AudioTokens int64 `json:"audio_tokens"`
+	    // Cached tokens present in the prompt.
+	    CachedTokens int64 `json:"cached_tokens"`
+    }
+
+    CompletionUsage {
+	    // Number of tokens in the generated completion.
+	    CompletionTokens int64 `json:"completion_tokens,required"`
+	    // Number of tokens in the prompt.
+	    PromptTokens int64 `json:"prompt_tokens,required"`
+	    // Total number of tokens used in the request (prompt + completion).
+	    TotalTokens int64 `json:"total_tokens,required"`
+	    // Breakdown of tokens used in a completion.
+	    CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
+	    // Breakdown of tokens used in the prompt.
+	    PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
+    }   
+
+)
+
+@server(
+    group: chat
+    prefix: /v1
+)
+
+service Wechat {
+    
+    @handler getAuth
+    get /chat/getauth () returns (BaseMsgResp)
+}
+
+@server(
+	
+    group: chat
+    prefix: /v1
+	jwt: Auth
+	middleware: OpenAuthority
+)
+
+service Wechat {
+    
+    @handler chatCompletions
+    post /chat/completions (CompApiReq) returns (CompOpenApiResp)
+}

+ 15 - 12
etc/wechat.yaml

@@ -6,8 +6,8 @@ Timeout: 30000
 Mode: "dev"
 
 Auth:
-  AccessSecret: jS6VKDtsJf3z1n2VKDtsJf3z1n2
-  AccessExpire: 2592000
+  AccessSecret: LnQD46hBde0AgFXBer8ZZZe3FgC
+  AccessExpire: 259200
 
 CROSConf:
   Address: '*'
@@ -28,8 +28,8 @@ DatabaseConf:
   Host: mysql-server
   Port: 3306
   DBName: wechat
-  Username: wallet
-  Password: wallet
+  Username: root
+  Password: simple-admin.
   MaxOpenConn: 100
   SSLMode: disable
   CacheTime: 5
@@ -47,9 +47,9 @@ CasbinDatabaseConf:
   Type: mysql
   Host: mysql-server
   Port: 3306
-  DBName: wechat-admin
-  Username: wallet
-  Password: wallet
+  DBName: wechat_admin
+  Username: root
+  Password: simple-admin.
   MaxOpenConn: 100
   SSLMode: disable
   CacheTime: 5
@@ -90,9 +90,12 @@ OpenAI:
   BaseUrl: https://api.openai.com/v1
   ApiKey: sk-ZQRNypQOC8ID5WbpCdF263C58dF44271842e86D408Bb3848
 
-WebSocket:
-    -
-      Type: wechat
-      Name: default
-      Url: ws://chat.gkscrm.com:13088
+FastgptMongoConf:
+  Url: mongodb://myusername:mypassword@47.251.25.21:27017/?connect=direct
+  DBName: fastgpt
 
+WebSocket:
+  -
+    Type: wecom
+    Name: default
+    Url: ws://wecom.gkscrm.com:15088  

+ 2 - 1
go.mod

@@ -11,10 +11,11 @@ require (
 	github.com/alibabacloud-go/sts-20150401/v2 v2.0.2
 	github.com/alibabacloud-go/tea v1.2.2
 	github.com/alibabacloud-go/tea-utils/v2 v2.0.7
-	github.com/bwmarrin/snowflake v0.3.0
 	github.com/casbin/casbin/v2 v2.85.0
+	github.com/deckarep/golang-set/v2 v2.8.0
 	github.com/go-resty/resty/v2 v2.14.0
 	github.com/gofrs/uuid/v5 v5.0.0
+	github.com/golang-jwt/jwt/v4 v4.3.0
 	github.com/golang-jwt/jwt/v5 v5.2.1
 	github.com/gorilla/websocket v1.5.0
 	github.com/imroc/req/v3 v3.43.1

+ 4 - 2
go.sum

@@ -118,8 +118,6 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
 github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
 github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
 github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
-github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0=
-github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE=
 github.com/casbin/casbin/v2 v2.29.2/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
 github.com/casbin/casbin/v2 v2.85.0 h1:VajW9GR/T0fp3SND183gneZGIAdYtl9C7bDYBrqQiGg=
 github.com/casbin/casbin/v2 v2.85.0/go.mod h1:jX8uoN4veP85O/n2674r2qtfSXI6myvxW85f6TH50fw=
@@ -163,6 +161,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ=
+github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
@@ -242,6 +242,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
 github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
+github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
 github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
 github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=

+ 44 - 0
internal/handler/chat/chat_completions_handler.go

@@ -0,0 +1,44 @@
+package chat
+
+import (
+	"net/http"
+
+	"github.com/zeromicro/go-zero/rest/httpx"
+
+	"wechat-api/internal/logic/chat"
+	"wechat-api/internal/svc"
+	"wechat-api/internal/types"
+)
+
+// swagger:route post /v1/chat/completions chat ChatCompletions
+//
+
+//
+
+//
+// Parameters:
+//  + name: body
+//    require: true
+//    in: body
+//    type: CompApiReq
+//
+// Responses:
+//  200: CompOpenApiResp
+
+func ChatCompletionsHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		var req types.CompApiReq
+		if err := httpx.Parse(r, &req, true); err != nil {
+			httpx.ErrorCtx(r.Context(), w, err)
+			return
+		}
+
+		l := chat.NewChatCompletionsLogic(r.Context(), svcCtx)
+		resp, err := l.ChatCompletions(&req)
+		if err != nil {
+			httpx.ErrorCtx(r.Context(), w, err)
+		} else {
+			httpx.OkJsonCtx(r.Context(), w, resp)
+		}
+	}
+}

+ 31 - 0
internal/handler/chat/get_auth_handler.go

@@ -0,0 +1,31 @@
+package chat
+
+import (
+	"net/http"
+
+	"github.com/zeromicro/go-zero/rest/httpx"
+
+	"wechat-api/internal/logic/chat"
+	"wechat-api/internal/svc"
+)
+
+// swagger:route get /v1/chat/getauth chat GetAuth
+//
+
+//
+
+//
+// Responses:
+//  200: BaseMsgResp
+
+func GetAuthHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		l := chat.NewGetAuthLogic(r.Context(), svcCtx)
+		resp, err := l.GetAuth()
+		if err != nil {
+			httpx.ErrorCtx(r.Context(), w, err)
+		} else {
+			httpx.OkJsonCtx(r.Context(), w, resp)
+		}
+	}
+}

+ 27 - 51
internal/handler/routes.go

@@ -24,16 +24,15 @@ import (
 	base "wechat-api/internal/handler/base"
 	batch_msg "wechat-api/internal/handler/batch_msg"
 	category "wechat-api/internal/handler/category"
+	chat "wechat-api/internal/handler/chat"
 	chatrecords "wechat-api/internal/handler/chatrecords"
 	chatsession "wechat-api/internal/handler/chatsession"
 	contact "wechat-api/internal/handler/contact"
 	credit_balance "wechat-api/internal/handler/credit_balance"
 	credit_usage "wechat-api/internal/handler/credit_usage"
 	dashboard "wechat-api/internal/handler/dashboard"
-	department "wechat-api/internal/handler/department"
 	employee "wechat-api/internal/handler/employee"
 	employee_config "wechat-api/internal/handler/employee_config"
-	fastgpt "wechat-api/internal/handler/fastgpt"
 	label "wechat-api/internal/handler/label"
 	label_relationship "wechat-api/internal/handler/label_relationship"
 	label_tagging "wechat-api/internal/handler/label_tagging"
@@ -854,6 +853,32 @@ func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
 	)
 
 	server.AddRoutes(
+		[]rest.Route{
+			{
+				Method:  http.MethodGet,
+				Path:    "/chat/getauth",
+				Handler: chat.GetAuthHandler(serverCtx),
+			},
+		},
+		rest.WithPrefix("/v1"),
+	)
+
+	server.AddRoutes(
+		rest.WithMiddlewares(
+			[]rest.Middleware{serverCtx.OpenAuthority},
+			[]rest.Route{
+				{
+					Method:  http.MethodPost,
+					Path:    "/chat/completions",
+					Handler: chat.ChatCompletionsHandler(serverCtx),
+				},
+			}...,
+		),
+		rest.WithJwt(serverCtx.Config.Auth.AccessSecret),
+		rest.WithPrefix("/v1"),
+	)
+
+	server.AddRoutes(
 		rest.WithMiddlewares(
 			[]rest.Middleware{serverCtx.Authority},
 			[]rest.Route{
@@ -2041,53 +2066,4 @@ func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
 		),
 		rest.WithJwt(serverCtx.Config.Auth.AccessSecret),
 	)
-
-	server.AddRoutes(
-		[]rest.Route{
-			{
-				Method:  http.MethodGet,
-				Path:    "/api/fastgpt/set_token",
-				Handler: fastgpt.SetTokenHandler(serverCtx),
-			},
-			{
-				Method:  http.MethodPost,
-				Path:    "/api/fastgpt/create",
-				Handler: fastgpt.CreateFastgptHandler(serverCtx),
-			},
-		},
-	)
-
-	server.AddRoutes(
-		rest.WithMiddlewares(
-			[]rest.Middleware{serverCtx.Authority},
-			[]rest.Route{
-				{
-					Method:  http.MethodPost,
-					Path:    "/department/create",
-					Handler: department.CreateDepartmentHandler(serverCtx),
-				},
-				{
-					Method:  http.MethodPost,
-					Path:    "/department/update",
-					Handler: department.UpdateDepartmentHandler(serverCtx),
-				},
-				{
-					Method:  http.MethodPost,
-					Path:    "/department/delete",
-					Handler: department.DeleteDepartmentHandler(serverCtx),
-				},
-				{
-					Method:  http.MethodPost,
-					Path:    "/department/list",
-					Handler: department.GetDepartmentListHandler(serverCtx),
-				},
-				{
-					Method:  http.MethodPost,
-					Path:    "/department",
-					Handler: department.GetDepartmentByIdHandler(serverCtx),
-				},
-			}...,
-		),
-		rest.WithJwt(serverCtx.Config.Auth.AccessSecret),
-	)
 }

+ 55 - 0
internal/logic/chat/chat_completions_logic.go

@@ -0,0 +1,55 @@
+package chat
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"wechat-api/internal/svc"
+	"wechat-api/internal/types"
+	"wechat-api/internal/utils/compapi"
+	"wechat-api/internal/utils/contextkey"
+
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type ChatCompletionsLogic struct {
+	logx.Logger
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+}
+
+func NewChatCompletionsLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ChatCompletionsLogic {
+	return &ChatCompletionsLogic{
+		Logger: logx.WithContext(ctx),
+		ctx:    ctx,
+		svcCtx: svcCtx}
+}
+
+func (l *ChatCompletionsLogic) ChatCompletions(req *types.CompApiReq) (resp *types.CompOpenApiResp, err error) {
+	// todo: add your logic here and delete this line
+
+	/*
+	   1.鉴权获得token
+	   2.必要参数检测及转换
+	   3. 根据event_type选择不同处理路由
+	*/
+	var (
+		authToken string
+		ok        bool
+	)
+	authToken, ok = contextkey.OpenapiTokenKey.GetValue(l.ctx)
+	if !ok {
+		return nil, errors.New("content get token err")
+	}
+	fmt.Printf("In ChatCompletionsLogic.ChatCompletions AuthToken:%v\n", authToken)
+
+	return l.workForFastgpt(req)
+}
+
+func (l *ChatCompletionsLogic) workForFastgpt(req *types.CompApiReq) (resp *types.CompOpenApiResp, err error) {
+
+	apiKey := "fastgpt-d2uehCb2T40h9chNGjf4bpFrVKmMkCFPbrjfVLZ6DAL2zzqzOFJWP"
+	return compapi.NewFastgptChatCompletions(l.ctx, apiKey, req)
+
+}

+ 54 - 0
internal/logic/chat/get_auth_logic.go

@@ -0,0 +1,54 @@
+package chat
+
+import (
+	"context"
+	"time"
+
+	"wechat-api/internal/svc"
+	"wechat-api/internal/types"
+
+	myjwt "github.com/golang-jwt/jwt/v4"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetAuthLogic struct {
+	logx.Logger
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+}
+
+func NewGetAuthLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetAuthLogic {
+	return &GetAuthLogic{
+		Logger: logx.WithContext(ctx),
+		ctx:    ctx,
+		svcCtx: svcCtx}
+}
+
+func (l *GetAuthLogic) GetAuth() (resp *types.BaseMsgResp, err error) {
+	// todo: add your logic here and delete this line
+
+	now := time.Now().Unix()
+	accessExpire := l.svcCtx.Config.Auth.AccessExpire
+	accessSecret := l.svcCtx.Config.Auth.AccessSecret
+	Id := now
+
+	accessToken, err := getToken(accessSecret, now, accessExpire, Id)
+	if err != nil {
+		accessToken = ""
+	}
+
+	return &types.BaseMsgResp{
+		Code: 100,
+		Msg:  accessToken,
+	}, err
+}
+
+func getToken(secretKey string, iat, seconds, uid int64) (string, error) {
+	claims := make(myjwt.MapClaims)
+	claims["exp"] = iat + seconds
+	claims["iat"] = iat
+	claims["uid"] = uid
+	token := myjwt.New(myjwt.SigningMethodHS256)
+	token.Claims = claims
+	return token.SignedString([]byte(secretKey))
+}

+ 71 - 0
internal/middleware/openauthority_middleware.go

@@ -0,0 +1,71 @@
+package middleware
+
+import (
+	"fmt"
+	"net/http"
+
+	"wechat-api/ent"
+	"wechat-api/internal/utils/contextkey"
+	jwtutils "wechat-api/internal/utils/jwt"
+
+	"wechat-api/internal/config"
+
+	baseconfig "github.com/suyuan32/simple-admin-common/config"
+
+	"github.com/redis/go-redis/v9"
+	"github.com/suyuan32/simple-admin-common/utils/jwt"
+	"github.com/zeromicro/go-zero/core/errorx"
+	"github.com/zeromicro/go-zero/core/logx"
+	"github.com/zeromicro/go-zero/rest/httpx"
+)
+
+/*
+//"wechat-api/internal/types/payload"
+var p types.payload.SendWxPayload
+*/
+
+type OpenAuthorityMiddleware struct {
+	DB     *ent.Client
+	Rds    redis.UniversalClient
+	Config config.Config
+}
+
+func NewOpenAuthorityMiddleware(db *ent.Client, rds redis.UniversalClient, c config.Config) *OpenAuthorityMiddleware {
+	return &OpenAuthorityMiddleware{
+		DB:     db,
+		Rds:    rds,
+		Config: c,
+	}
+}
+
+func (m *OpenAuthorityMiddleware) Handle(next http.HandlerFunc) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		// TODO generate middleware implement function, delete after code implementation
+		apiToken := jwt.StripBearerPrefixFromToken(r.Header.Get("Authorization"))
+		claims, err := jwtutils.ParseJwtToken(m.Config.Auth.AccessSecret, apiToken)
+		fmt.Println("claims")
+		fmt.Printf("%+v\n", claims)
+		if err != nil {
+			logx.Errorw("check user auth error", logx.Field("detail", err.Error()))
+			httpx.Error(w, errorx.NewApiError(http.StatusInternalServerError, err.Error()))
+			return
+		}
+		ctx := r.Context()
+		ctx = contextkey.HttpResponseWriterKey.WithValue(ctx, w) //context存入http.ResponseWriter
+		if len(apiToken) > 0 {
+			ctx = contextkey.OpenapiTokenKey.WithValue(ctx, apiToken)
+		}
+
+		userAgent := r.Header.Get("User-Agent")
+
+		fmt.Printf("In OpenAuthorityMiddlewar:%s,userAgent:%s\n", baseconfig.RedisTokenPrefix+":"+apiToken, userAgent)
+
+		//ctx = context.WithValue(ctx, "HttpResp-Writer", w)
+		//ctx = WithOutWriter(ctx, w)
+		//httpx.Error(w, errorx.NewApiError(http.StatusInternalServerError, "错误啦"))
+		newReq := r.WithContext(ctx)
+		// Passthrough to next handler if need
+		next(w, newReq)
+		//next(w, r)
+	}
+}

+ 22 - 78
internal/svc/service_context.go

@@ -1,41 +1,31 @@
 package svc
 
 import (
-	"github.com/bwmarrin/snowflake"
-	"github.com/casbin/casbin/v2"
-	"github.com/redis/go-redis/v9"
-	"github.com/zeromicro/go-zero/core/collection"
-	"gorm.io/gorm"
-	"time"
-	"wechat-api/database"
-	"wechat-api/database/dao/wechat/query"
 	"wechat-api/internal/config"
 	"wechat-api/internal/middleware"
-	"wechat-api/internal/pkg/wechat_ws"
+
+	"github.com/redis/go-redis/v9"
+
+	"wechat-api/ent"
+	_ "wechat-api/ent/runtime"
 
 	"github.com/suyuan32/simple-admin-core/rpc/coreclient"
 	"github.com/zeromicro/go-zero/core/logx"
+
+	"github.com/casbin/casbin/v2"
 	"github.com/zeromicro/go-zero/rest"
 	"github.com/zeromicro/go-zero/zrpc"
-	"wechat-api/ent"
-	_ "wechat-api/ent/runtime"
-	mongo_model "wechat-api/mongo_model"
 )
 
 type ServiceContext struct {
-	Config      config.Config
-	Casbin      *casbin.Enforcer
-	Authority   rest.Middleware
-	Miniprogram rest.Middleware
-	DB          *ent.Client //大家都不爱用,后边可以慢慢过渡到gorm的库
-	WechatDB    *gorm.DB
-	WechatQ     *query.Query
-	CoreRpc     coreclient.Core
-	Rds         redis.UniversalClient
-	WechatWs    map[string]*wechat_ws.WechatWsClient
-	Cache       *collection.Cache
-	NodeID      *snowflake.Node
-	MongoModel  *mongo_model.AllMongoModel
+	Config        config.Config
+	Casbin        *casbin.Enforcer
+	Authority     rest.Middleware
+	OpenAuthority rest.Middleware
+	Miniprogram   rest.Middleware
+	DB            *ent.Client
+	CoreRpc       coreclient.Core
+	Rds           redis.UniversalClient
 }
 
 func NewServiceContext(c config.Config) *ServiceContext {
@@ -50,61 +40,15 @@ func NewServiceContext(c config.Config) *ServiceContext {
 		ent.Debug(), // debug mode
 	)
 
-	// 初始化 MongoDB 客户端
-	all_mongo_model := mongo_model.SetupMongoModel(c.FastgptMongoConf.Url, c.FastgptMongoConf.DBName)
-
-	// gorm 数据库连接
-	wechatDb, err := database.InitWechatDB(c.DatabaseConf, c.Mode)
-	if err != nil {
-		logx.Error(err)
-		panic("gorm 数据库连接失败")
-	}
-	wechatQ := query.Use(wechatDb)
-
 	coreRpc := coreclient.NewCore(zrpc.NewClientIfEnable(c.CoreRpc))
 
-	// 初始化微信ws客户端
-	// todo 现在配置是从 config.yaml中读取的,后续需要改成从数据库中读取,以便匹配不同的微信号
-	wechatWs := make(map[string]*wechat_ws.WechatWsClient)
-
-	for _, ws := range c.WebSocket {
-		if ws.Type == "wechat" {
-			client, err := wechat_ws.NewWechatWsClient(ws.Url, ws.Name, ws.Type)
-			if err != nil {
-				logx.Error(err)
-			} else {
-				logx.Info("建立ws client成功~", ws.Name)
-				go client.ReadPump()
-				go client.WritePump()
-				wechatWs[ws.Name] = client
-			}
-		}
-	}
-
-	cache, err := collection.NewCache(time.Hour * 24 * 365)
-	if err != nil {
-		logx.Error(err)
-		panic("本地缓存实例失败")
-	}
-
-	node, err := snowflake.NewNode(1)
-	if err != nil {
-		logx.Error(err)
-		panic("雪花算法实例失败")
-	}
-
 	return &ServiceContext{
-		Config:      c,
-		Authority:   middleware.NewAuthorityMiddleware(cbn, rds, coreRpc).Handle,
-		Miniprogram: middleware.NewMiniprogramMiddleware(cbn, rds, coreRpc, c).Handle,
-		DB:          db,
-		WechatDB:    wechatDb,
-		WechatQ:     wechatQ,
-		CoreRpc:     coreRpc,
-		Rds:         rds,
-		WechatWs:    wechatWs,
-		Cache:       cache,
-		NodeID:      node,
-		MongoModel:  all_mongo_model,
+		Config:        c,
+		Authority:     middleware.NewAuthorityMiddleware(cbn, rds, coreRpc).Handle,
+		OpenAuthority: middleware.NewOpenAuthorityMiddleware(db, rds, c).Handle,
+		Miniprogram:   middleware.NewMiniprogramMiddleware(cbn, rds, coreRpc, c).Handle,
+		DB:            db,
+		CoreRpc:       coreRpc,
+		Rds:           rds,
 	}
 }

+ 164 - 79
internal/types/types.go

@@ -1895,6 +1895,170 @@ type WxidReq struct {
 	Wxid string `json:"wxid"`
 }
 
+// 以下是API请求类型
+// swagger:model CompApiReq
+type CompApiReq struct {
+	CompCtlReq
+	StdCompApiReq
+	FastGptSpecReq
+}
+
+// FastGpt Completions请求信息
+// swagger:model FastGptApiReq
+type FastGptApiReq struct {
+	StdCompApiReq
+	FastGptSpecReq
+}
+
+// 标准Completions请求信息
+// swagger:model StdCompApiReq
+type StdCompApiReq struct {
+	//model,like 'gpt-4o'
+	Model string `json:"model,optional"`
+	//Message list
+	Messages []StdCompMessage `json:"messages"`
+	//Stream 是否流式输出
+	Stream bool `json:"stream,default=false"`
+}
+
+// 关于工作流配置的请求信息
+// swagger:model CompCtlReq
+type CompCtlReq struct {
+	//EventType事件类型
+	EventType string `json:"event_type,default=fastgpt"`
+	//WorkId工作流ID
+	WorkId string `json:"work_id"`
+	//IsBatch 是同步还是异步,默认及取值false表明同步
+	IsBatch bool `json:"is_batch,default=false"`
+	//异步回调地址
+	Callback string `json:"callback,optional"`
+}
+
+// swagger:model FastGptSpecReq
+type FastGptSpecReq struct {
+	//ChatId
+	ChatId string `json:"chat_id,optional"`
+	//ResponseChatItemId
+	ResponseChatItemId string `json:"response_chat_item_id,optional"`
+	//Detail 详情开关
+	Detail bool `json:"detail,default=false"`
+	//Variables
+	Variables map[string]string `json:"variables,optional"`
+}
+
+type StdCompMessage struct {
+	Role    string `json:"role"`
+	Content string `json:"content"`
+}
+
+// 以下是API响应类型
+// swagger:model CompOpenApiResp
+type CompOpenApiResp struct {
+	StdCompApiResp
+	FastgptSpecResp
+}
+
+// swagger:model StdCompApiResp
+type StdCompApiResp struct {
+	// A unique identifier for the chat completion.
+	ID string `json:"id"`
+	// A list of chat completion choices. Can be more than one if `n` is greater
+	// than 1.
+	Choices []ChatCompletionChoice `json:"choices"`
+	// The Unix timestamp (in seconds) of when the chat completion was created.
+	Created int64 `json:"created"`
+	// The model used for the chat completion.
+	Model string `json:"model"`
+	// The object type, which is always `chat.completion`.
+	Object string `json:"object"`
+	// The service tier used for processing the request.
+	ServiceTier string `json:"service_tier,omitempty"`
+	// This fingerprint represents the backend configuration that the model runs with.
+	//
+	// Can be used in conjunction with the `seed` request parameter to understand when
+	// backend changes have been made that might impact determinism.
+	SystemFingerprint string `json:"system_fingerprint"`
+	// Usage statistics for the completion request.
+	Usage CompletionUsage `json:"usage,omitempty"`
+}
+
+// swagger:model FastgptSpecResp
+type FastgptSpecResp struct {
+	ResponseData []map[string]any `json:"responseData,omitempty"`
+	NewVariables map[string]any   `json:"newVariables,omitempty"`
+}
+
+type ChatCompletionAudio struct {
+	// Unique identifier for this audio response.
+	ID string `json:"id"`
+}
+
+type ChatCompletionMessage struct {
+	// The contents of the message.
+	Content string `json:"content"`
+	//The contents of the reasoning message
+	ReasoningContent string `json:"reasoning_content,omitempty"`
+	// The refusal message generated by the model.
+	Refusal string `json:"refusal"`
+	// The role of the author of this message.
+	Role string `json:"role"`
+	// If the audio output modality is requested, this object contains data about the
+	// audio response from the model.
+	// [Learn more](https://platform.openai.com/docs/guides/audio).
+	Audio ChatCompletionAudio `json:"audio,omitempty"`
+}
+
+type ChatCompletionChoice struct {
+	// The reason the model stopped generating tokens. This will be `stop` if the model
+	// hit a natural stop point or a provided stop sequence, `length` if the maximum
+	// number of tokens specified in the request was reached, `content_filter` if
+	// content was omitted due to a flag from our content filters, `tool_calls` if the
+	// model called a tool, or `function_call` (deprecated) if the model called a
+	// function.
+	FinishReason string `json:"finish_reason"`
+	// The index of the choice in the list of choices.
+	Index int64 `json:"index"`
+	// A chat completion message generated by the model.
+	Message ChatCompletionMessage `json:"message,omitempty"`
+	// A chat completion message generated by the model stream mode.
+	Delta ChatCompletionMessage `json:"delta,omitempty"`
+}
+
+type CompletionUsageCompletionTokensDetails struct {
+	// When using Predicted Outputs, the number of tokens in the prediction that
+	// appeared in the completion.
+	AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
+	// Audio input tokens generated by the model.
+	AudioTokens int64 `json:"audio_tokens"`
+	// Tokens generated by the model for reasoning.
+	ReasoningTokens int64 `json:"reasoning_tokens"`
+	// When using Predicted Outputs, the number of tokens in the prediction that did
+	// not appear in the completion. However, like reasoning tokens, these tokens are
+	// still counted in the total completion tokens for purposes of billing, output,
+	// and context window limits.
+	RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
+}
+
+type CompletionUsagePromptTokensDetails struct {
+	// Audio input tokens present in the prompt.
+	AudioTokens int64 `json:"audio_tokens"`
+	// Cached tokens present in the prompt.
+	CachedTokens int64 `json:"cached_tokens"`
+}
+
+type CompletionUsage struct {
+	// Number of tokens in the generated completion.
+	CompletionTokens int64 `json:"completion_tokens,required"`
+	// Number of tokens in the prompt.
+	PromptTokens int64 `json:"prompt_tokens,required"`
+	// Total number of tokens used in the request (prompt + completion).
+	TotalTokens int64 `json:"total_tokens,required"`
+	// Breakdown of tokens used in a completion.
+	CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
+	// Breakdown of tokens used in the prompt.
+	PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
+}
+
 // The data of batch msg information | BatchMsg信息
 // swagger:model BatchMsgInfo
 type BatchMsgInfo struct {
@@ -4017,82 +4181,3 @@ type WhatsappChannelInfoResp struct {
 	// WhatsappChannel information | WhatsappChannel数据
 	Data WhatsappChannelInfo `json:"data"`
 }
-
-// swagger:model CreateInfo
-type CreateInfo struct {
-	// Translated Name | 展示名称
-	UserName string `json:"username"`
-	// Name | 部门名称
-	Title *string `json:"title"`
-}
-
-// The response data of department information | 部门信息
-// swagger:model DepartmentInfo
-type DepartmentInfo struct {
-	BaseIDInfo
-	// Translated Name | 展示名称
-	Trans string `json:"trans,optional"`
-	// Status | 状态
-	// max : 20
-	Status *uint32 `json:"status,optional" validate:"omitempty,lt=20"`
-	// Sort | 排序
-	// max : 10000
-	Sort *uint32 `json:"sort,optional" validate:"omitempty,lt=10000"`
-	// Name | 部门名称
-	// min length : 1
-	// max length : 50
-	Name *string `json:"name,optional" validate:"omitempty,min=1,max=50"`
-	// Ancestors | 父级部门列表
-	// max length : 200
-	Ancestors *string `json:"ancestors,optional" validate:"omitempty,max=200"`
-	// Leader | 部门负责人
-	// max length : 20
-	Leader *string `json:"leader,optional" validate:"omitempty,max=20"`
-	// Phone | 电话号码
-	// max length : 18
-	Phone *string `json:"phone,optional" validate:"omitempty,max=18"`
-	// Email | 邮箱
-	// max length : 70
-	Email *string `json:"email,optional" validate:"omitempty,max=70"`
-	// Remark | 备注
-	// max length : 200
-	Remark *string `json:"remark,optional" validate:"omitempty,max=200"`
-	// ParentId | 父级 ID
-	ParentId *uint64 `json:"parentId,optional"`
-}
-
-// The response data of department list | 部门列表数据
-// swagger:model DepartmentListResp
-type DepartmentListResp struct {
-	BaseDataInfo
-	// Department list data | 部门列表数据
-	Data DepartmentListInfo `json:"data"`
-}
-
-// Department list data | 部门列表数据
-// swagger:model DepartmentListInfo
-type DepartmentListInfo struct {
-	BaseListInfo
-	// The API list data | 部门列表数据
-	Data []DepartmentInfo `json:"data"`
-}
-
-// Get department list request params | 部门列表请求参数
-// swagger:model DepartmentListReq
-type DepartmentListReq struct {
-	PageInfo
-	// Name | 部门名称
-	// max length : 50
-	Name *string `json:"name,optional" validate:"omitempty,max=50"`
-	// Leader | 部门负责人
-	// max length : 20
-	Leader *string `json:"leader,optional" validate:"omitempty,max=20"`
-}
-
-// Department information response | 部门信息返回体
-// swagger:model DepartmentInfoResp
-type DepartmentInfoResp struct {
-	BaseDataInfo
-	// Department information | 部门数据
-	Data DepartmentInfo `json:"data"`
-}

+ 121 - 0
internal/utils/compapi/compsteam.go

@@ -0,0 +1,121 @@
+package compapi
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"errors"
+	"io"
+	"net/http"
+	"strings"
+	"wechat-api/internal/types"
+
+	"github.com/openai/openai-go/packages/ssestream"
+)
+
+type ChatCompSteamChunk struct {
+	types.StdCompApiResp
+	RAW string `json:"-"`
+}
+
+type ApiRespStreamChunk struct {
+	Event string             `json:"event"`
+	Data  ChatCompSteamChunk `json:"data"`
+}
+
+type myStreamDecoder struct {
+	evt    ssestream.Event
+	rc     io.ReadCloser
+	scn    *bufio.Scanner
+	err    error
+	closed bool
+	// 用于处理多行事件
+	pendingEvent string
+}
+
+func (r *ChatCompSteamChunk) UnmarshalJSON(data []byte) (err error) {
+	r.RAW = string(data)
+	type Alias ChatCompSteamChunk
+	return json.Unmarshal(data, (*Alias)(r))
+}
+
+func ApiRespStreamDecoder(res any) ssestream.Decoder {
+	var rc io.ReadCloser
+	switch v := res.(type) {
+	case *http.Response:
+		rc = v.Body
+	case []byte:
+		rc = io.NopCloser(bytes.NewReader(v))
+	case string:
+		rc = io.NopCloser(bytes.NewReader([]byte(v)))
+	default:
+		rc = io.NopCloser(strings.NewReader(""))
+	}
+	return &myStreamDecoder{rc: rc, scn: bufio.NewScanner(rc)}
+}
+
+func (s *myStreamDecoder) Event() ssestream.Event {
+	return s.evt
+}
+
+func (s *myStreamDecoder) Close() error {
+
+	s.closed = true
+	if closer, ok := s.rc.(io.Closer); ok {
+		return closer.Close()
+	}
+	return nil
+}
+
+func (s *myStreamDecoder) Err() error {
+	return s.err
+}
+
+func (s *myStreamDecoder) Next() bool {
+	if s.err != nil {
+		return false
+	}
+	eventType := ""
+	dataBuffer := bytes.NewBuffer(nil)
+
+	for s.scn.Scan() {
+		line := strings.TrimSpace(s.scn.Text())
+		if len(line) == 0 {
+			continue //跳过空行
+		}
+
+		// 处理事件类型行
+		if strings.HasPrefix(line, "event:") {
+			s.pendingEvent = strings.TrimSpace(line[len("event:"):])
+			continue
+		}
+
+		// 处理数据行
+		if strings.HasPrefix(line, "data:") {
+			tmpdata := strings.TrimSpace(line[len("data:"):])
+
+			//确定事件类型
+			if s.pendingEvent != "" {
+				eventType = s.pendingEvent
+				s.pendingEvent = ""
+			} else {
+				eventType = "answer" // 默认类型
+			}
+
+			_, s.err = dataBuffer.WriteString(tmpdata)
+			break
+		}
+		//忽略无法识别的行
+	}
+	if dataBuffer.Len() > 0 {
+		s.evt = ssestream.Event{
+			Type: eventType,
+			Data: dataBuffer.Bytes(),
+		}
+		return true
+	}
+	if err := s.scn.Err(); err != nil && !errors.Is(err, io.EOF) {
+		s.err = s.scn.Err()
+	}
+	return false
+}

+ 10 - 0
internal/utils/compapi/config.go

@@ -0,0 +1,10 @@
+package compapi
+
+import (
+	"github.com/openai/openai-go"
+)
+
+const (
+	ChatModelDeepSeekV3 openai.ChatModel = "deepseek-chat"
+	ChatModelDeepSeekR1 openai.ChatModel = "deepseek-reasoner"
+)

+ 183 - 0
internal/utils/compapi/func.go

@@ -0,0 +1,183 @@
+package compapi
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+
+	"wechat-api/internal/types"
+	"wechat-api/internal/utils/contextkey"
+
+	openai "github.com/openai/openai-go"
+	"github.com/openai/openai-go/option"
+	"github.com/openai/openai-go/packages/ssestream"
+	"github.com/zeromicro/go-zero/rest/httpx"
+)
+
+func NewFastgptClient(apiKey string) *openai.Client {
+	return openai.NewClient(option.WithAPIKey(apiKey),
+		option.WithBaseURL("http://fastgpt.ascrm.cn/api/v1/"))
+}
+
+func NewDeepSeekClient(apiKey string) *openai.Client {
+	return openai.NewClient(option.WithAPIKey(apiKey),
+		option.WithBaseURL("https://api.deepseek.com"))
+}
+
+func DoChatCompletions(ctx context.Context, client *openai.Client, chatInfo *types.CompApiReq) (*types.CompOpenApiResp, error) {
+
+	var (
+		jsonBytes []byte
+		err       error
+	)
+	emptyParams := openai.ChatCompletionNewParams{}
+	if jsonBytes, err = json.Marshal(chatInfo); err != nil {
+		return nil, err
+	}
+	customResp := types.CompOpenApiResp{}
+	reqBodyOps := option.WithRequestBody("application/json", jsonBytes)
+	respBodyOps := option.WithResponseBodyInto(&customResp)
+	if _, err = client.Chat.Completions.New(ctx, emptyParams, reqBodyOps, respBodyOps); err != nil {
+		return nil, err
+	}
+	return &customResp, nil
+
+}
+
+func DoChatCompletionsStream(ctx context.Context, client *openai.Client, chatInfo *types.CompApiReq) (res *types.CompOpenApiResp, err error) {
+
+	var (
+		jsonBytes []byte
+		raw       *http.Response
+		//raw []byte
+		ok bool
+		hw http.ResponseWriter
+	)
+
+	hw, ok = contextkey.HttpResponseWriterKey.GetValue(ctx) //context取出http.ResponseWriter
+	if !ok {
+		return nil, errors.New("content get http writer err")
+	}
+	flusher, ok := (hw).(http.Flusher)
+	if !ok {
+		http.Error(hw, "Streaming unsupported!", http.StatusInternalServerError)
+	}
+
+	emptyParams := openai.ChatCompletionNewParams{}
+	if jsonBytes, err = json.Marshal(chatInfo); err != nil {
+		return nil, err
+	}
+	reqBodyOps := option.WithRequestBody("application/json", jsonBytes)
+	respBodyOps := option.WithResponseBodyInto(&raw)
+	if _, err = client.Chat.Completions.New(ctx, emptyParams, reqBodyOps, respBodyOps, option.WithJSONSet("stream", true)); err != nil {
+		return nil, err
+	}
+
+	//设置流式输出头 http1.1
+	hw.Header().Set("Content-Type", "text/event-stream;charset=utf-8")
+	hw.Header().Set("Connection", "keep-alive")
+	hw.Header().Set("Cache-Control", "no-cache")
+
+	chatStream := ssestream.NewStream[ApiRespStreamChunk](ApiRespStreamDecoder(raw), err)
+	defer chatStream.Close()
+	for chatStream.Next() {
+		chunk := chatStream.Current()
+		fmt.Fprintf(hw, "event:%s\ndata:%s\n\n", chunk.Event, chunk.Data.RAW)
+		//time.Sleep(1 * time.Millisecond)
+	}
+	fmt.Fprintf(hw, "event:%s\ndata:%s\n\n", "answer", "[DONE]")
+	flusher.Flush()
+	httpx.Ok(hw)
+
+	return nil, nil
+}
+
+func NewChatCompletions(ctx context.Context, client *openai.Client, chatInfo *types.CompApiReq) (*types.CompOpenApiResp, error) {
+	if chatInfo.Stream {
+		return DoChatCompletionsStream(ctx, client, chatInfo)
+	} else {
+		return DoChatCompletions(ctx, client, chatInfo)
+	}
+}
+
+func NewFastgptChatCompletions(ctx context.Context, apiKey string, chatInfo *types.CompApiReq) (*types.CompOpenApiResp, error) {
+
+	client := NewFastgptClient(apiKey)
+	return NewChatCompletions(ctx, client, chatInfo)
+}
+
+func NewDeepSeekChatCompletions(ctx context.Context, apiKey string, chatInfo *types.CompApiReq, chatModel openai.ChatModel) (res *types.CompOpenApiResp, err error) {
+	client := NewDeepSeekClient(apiKey)
+	if chatModel != ChatModelDeepSeekV3 {
+		chatModel = ChatModelDeepSeekR1
+	}
+	chatInfo.Model = chatModel
+	return NewChatCompletions(ctx, client, chatInfo)
+}
+
+func DoChatCompletionsStreamOld(ctx context.Context, client *openai.Client, chatInfo *types.CompApiReq) (res *types.CompOpenApiResp, err error) {
+	var (
+		jsonBytes []byte
+	)
+	emptyParams := openai.ChatCompletionNewParams{}
+	if jsonBytes, err = json.Marshal(chatInfo); err != nil {
+		return nil, err
+	}
+
+	reqBodyOps := option.WithRequestBody("application/json", jsonBytes)
+	//customResp := types.CompOpenApiResp{}
+	//respBodyOps := option.WithResponseBodyInto(&customResp)
+	//chatStream := client.Chat.Completions.NewStreaming(ctx, emptyParams, reqBodyOps, respBodyOps)
+	chatStream := client.Chat.Completions.NewStreaming(ctx, emptyParams, reqBodyOps)
+
+	// optionally, an accumulator helper can be used
+	acc := openai.ChatCompletionAccumulator{}
+
+	httpWriter, ok := ctx.Value("HttpResp-Writer").(http.ResponseWriter)
+	if !ok {
+		return nil, errors.New("content get writer err")
+	}
+
+	//httpWriter.Header().Set("Content-Type", "text/event-stream;charset=utf-8")
+	//httpWriter.Header().Set("Connection", "keep-alive")
+	//httpWriter.Header().Set("Cache-Control", "no-cache")
+
+	idx := 0
+	for chatStream.Next() {
+		chunk := chatStream.Current()
+		acc.AddChunk(chunk)
+
+		fmt.Printf("=====>get %d chunk:%v\n", idx, chunk)
+		if _, err := fmt.Fprintf(httpWriter, "%v", chunk); err != nil {
+			fmt.Printf("Error writing to client:%v \n", err)
+			break
+		}
+
+		if content, ok := acc.JustFinishedContent(); ok {
+			println("Content stream finished:", content)
+		}
+
+		// if using tool calls
+		if tool, ok := acc.JustFinishedToolCall(); ok {
+			println("Tool call stream finished:", tool.Index, tool.Name, tool.Arguments)
+		}
+
+		if refusal, ok := acc.JustFinishedRefusal(); ok {
+			println("Refusal stream finished:", refusal)
+		}
+
+		// it's best to use chunks after handling JustFinished events
+		if len(chunk.Choices) > 0 {
+			idx++
+			fmt.Printf("idx:%d get =>'%s'\n", idx, chunk.Choices[0].Delta.Content)
+		}
+
+	}
+
+	if err := chatStream.Err(); err != nil {
+		return nil, err
+	}
+	return nil, nil
+}

+ 9 - 0
internal/utils/contextkey/config.go

@@ -0,0 +1,9 @@
+package contextkey
+
+import "net/http"
+
+// 在包内定义全局键(通常在 vars 块或 init 函数中初始化)
+var (
+	HttpResponseWriterKey = NewCtxKey[http.ResponseWriter]() // 管理http.ResponseWriter
+	OpenapiTokenKey       = NewCtxKey[string]()              // 管理openai token
+)

+ 35 - 0
internal/utils/contextkey/func.go

@@ -0,0 +1,35 @@
+package contextkey
+
+import (
+	"context"
+)
+
+// Key 泛型键对象,封装特定类型的上下文存取
+type CtxKey[T any] struct {
+	key any // 实际存储的唯一键(通过空结构体保证类型唯一)
+}
+
+// NewKey 创建管理特定类型值的键对象(每个调用生成唯一键)
+func NewCtxKey[T any]() CtxKey[T] {
+	type uniqueKey struct{} // 闭包内部类型确保唯一性
+	return CtxKey[T]{key: uniqueKey{}}
+}
+
+// WithValue 将值存入context,返回新context
+func (k CtxKey[T]) WithValue(ctx context.Context, value T) context.Context {
+	//fmt.Printf("WithValue=====>key:%v|addr:%p\n", k.key, &k.key)
+	return context.WithValue(ctx, k.key, value)
+}
+
+// GetValue 从context中提取值(带类型安全校验)
+func (k CtxKey[T]) GetValue(ctx context.Context) (T, bool) {
+	//fmt.Printf("GetValue#1=====>key:%v|addr:%p\n", k.key, &k.key)
+	v := ctx.Value(k.key)
+	if v == nil {
+		var zero T
+		return zero, false
+	}
+	//fmt.Printf("GetValue#2=====>key:%v|addr:%p\n", k.key, &k.key)
+	t, ok := v.(T)
+	return t, ok
+}