chat.api 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. import "../base.api"
  2. type (
  3. //add_friend_by_phone api接口请求值
  4. AddFriendByPhoneReq {
  5. Type int `json:"type,options=1|3,default=1"`
  6. WeChatIds []string `json:"wechat_ids,optional,omitempty"`
  7. Phones []string `json:"phones"`
  8. Message string `json:"message"`
  9. CallbackURL string `json:"callback_url,optional"`
  10. }
  11. //以下是API请求类型
  12. CompApiReq {
  13. CompCtlReq
  14. StdCompApiReq
  15. FastGptSpecReq
  16. }
  17. //FastGpt Completions请求信息
  18. FastGptApiReq {
  19. StdCompApiReq
  20. FastGptSpecReq
  21. }
  22. //标准Completions请求信息
  23. StdCompApiReq {
  24. //model,like 'gpt-4o'
  25. Model string `json:"model,optional"`
  26. //Message list
  27. Messages []StdCompMessage `json:"messages"`
  28. //Stream 是否流式输出
  29. Stream bool `json:"stream,default=false"`
  30. //格式化输出定义
  31. ResponseFormat interface{} `json:"response_format,omitempty"`
  32. }
  33. //关于工作流配置的请求信息
  34. CompCtlReq {
  35. //EventType事件类型
  36. EventType string `json:"event_type,default=fastgpt"`
  37. //WorkId工作流ID
  38. WorkId string `json:"work_id,optional,omitempty"`
  39. //IsBatch 是同步还是异步,默认及取值false表明同步
  40. IsBatch bool `json:"is_batch,default=false"`
  41. //异步回调地址
  42. Callback string `json:"callback,optional,omitempty"`
  43. }
  44. FastGptSpecReq {
  45. //ChatId
  46. ChatId string `json:"chat_id,optional,omitempty"`
  47. //FastgptChatId
  48. FastgptChatId string `json:"chatId,optional,omitempty"`
  49. //ResponseChatItemId
  50. ResponseChatItemId string `json:"response_chat_item_id,optional,omitempty"`
  51. //Detail 详情开关
  52. Detail bool `json:"detail,default=false"`
  53. //Variables
  54. Variables map[string]string `json:"variables,optional,omitempty"`
  55. }
  56. StdCompMessage {
  57. Role string `json:"role"`
  58. Content interface{} `json:"content"`
  59. //Content string `json:"content"`
  60. }
  61. //以下是API响应类型
  62. CompOpenApiResp {
  63. StdCompApiResp
  64. FastgptSpecResp
  65. FastgptErrResp
  66. }
  67. StdCompApiResp {
  68. // A unique identifier for the chat completion.
  69. ID string `json:"id"`
  70. // A list of chat completion choices. Can be more than one if `n` is greater
  71. // than 1.
  72. Choices []ChatCompletionChoice `json:"choices"`
  73. // The Unix timestamp (in seconds) of when the chat completion was created.
  74. Created int64 `json:"created"`
  75. // The model used for the chat completion.
  76. Model string `json:"model"`
  77. // The object type, which is always `chat.completion`.
  78. Object string `json:"object"`
  79. // The service tier used for processing the request.
  80. ServiceTier string `json:"service_tier,omitempty"`
  81. // This fingerprint represents the backend configuration that the model runs with.
  82. //
  83. // Can be used in conjunction with the `seed` request parameter to understand when
  84. // backend changes have been made that might impact determinism.
  85. SystemFingerprint string `json:"system_fingerprint"`
  86. // Usage statistics for the completion request.
  87. Usage CompletionUsage `json:"usage,omitempty"`
  88. }
  89. FastgptSpecResp {
  90. ResponseData []map[string]interface{} `json:"responseData,omitempty"`
  91. NewVariables map[string]interface{} `json:"newVariables,omitempty"`
  92. }
  93. FastgptErrResp {
  94. FgtErrCode *int `json:"code,omitempty"`
  95. FgtErrStatusTxt *string `json:"statusText,omitempty"`
  96. FgtErrMessage *string `json:"message,omitempty"`
  97. }
  98. DeepseekErrResp {
  99. DSErr DeepseekErrInfo `json:"error,omitempty"`
  100. }
  101. DeepseekErrInfo {
  102. Message string `json:"message,omitempty"`
  103. Type string `json:"type,omitempty"`
  104. Code string `json:"code,omitempty"`
  105. Param interface{} `json:"param,omitempty"`
  106. }
  107. ChatCompletionAudio {
  108. // Unique identifier for this audio response.
  109. ID string `json:"id"`
  110. //TODO
  111. }
  112. ChatCompletionMessage {
  113. // The contents of the message.
  114. Content string `json:"content"`
  115. //The contents of the reasoning message
  116. ReasoningContent string `json:"reasoning_content,omitempty"`
  117. // The refusal message generated by the model.
  118. Refusal string `json:"refusal"`
  119. // The role of the author of this message.
  120. Role string `json:"role"`
  121. // If the audio output modality is requested, this object contains data about the
  122. // audio response from the model.
  123. // [Learn more](https://platform.openai.com/docs/guides/audio).
  124. Audio ChatCompletionAudio `json:"audio,omitempty"`
  125. }
  126. ChatCompletionChoice {
  127. // The reason the model stopped generating tokens. This will be `stop` if the model
  128. // hit a natural stop point or a provided stop sequence, `length` if the maximum
  129. // number of tokens specified in the request was reached, `content_filter` if
  130. // content was omitted due to a flag from our content filters, `tool_calls` if the
  131. // model called a tool, or `function_call` (deprecated) if the model called a
  132. // function.
  133. FinishReason string `json:"finish_reason"`
  134. // The index of the choice in the list of choices.
  135. Index int64 `json:"index"`
  136. // A chat completion message generated by the model.
  137. Message ChatCompletionMessage `json:"message,omitempty"`
  138. // A chat completion message generated by the model stream mode.
  139. Delta ChatCompletionMessage `json:"delta,omitempty"`
  140. //
  141. Logprobs string `json:"logprobs"`
  142. }
  143. CompletionUsageCompletionTokensDetails {
  144. // When using Predicted Outputs, the number of tokens in the prediction that
  145. // appeared in the completion.
  146. AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
  147. // Audio input tokens generated by the model.
  148. AudioTokens int64 `json:"audio_tokens"`
  149. // Tokens generated by the model for reasoning.
  150. ReasoningTokens int64 `json:"reasoning_tokens"`
  151. // When using Predicted Outputs, the number of tokens in the prediction that did
  152. // not appear in the completion. However, like reasoning tokens, these tokens are
  153. // still counted in the total completion tokens for purposes of billing, output,
  154. // and context window limits.
  155. RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
  156. }
  157. CompletionUsagePromptTokensDetails {
  158. // Audio input tokens present in the prompt.
  159. AudioTokens int64 `json:"audio_tokens"`
  160. // Cached tokens present in the prompt.
  161. CachedTokens int64 `json:"cached_tokens"`
  162. }
  163. CompletionUsage {
  164. // Number of tokens in the generated completion.
  165. CompletionTokens int64 `json:"completion_tokens,required"`
  166. // Number of tokens in the prompt.
  167. PromptTokens int64 `json:"prompt_tokens,required"`
  168. // Total number of tokens used in the request (prompt + completion).
  169. TotalTokens int64 `json:"total_tokens,required"`
  170. // Breakdown of tokens used in a completion.
  171. CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
  172. // Breakdown of tokens used in the prompt.
  173. PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
  174. }
  175. )
  176. @server(
  177. group: chat
  178. prefix: /v1
  179. )
  180. service Wechat {
  181. @handler getAuth
  182. get /chat/getauth () returns (BaseMsgResp)
  183. }
  184. @server(
  185. group: chat
  186. prefix: /v1
  187. //jwt: Auth
  188. middleware: OpenAuthority
  189. )
  190. service Wechat {
  191. @handler chatCompletions
  192. post /chat/completions (CompApiReq) returns (CompOpenApiResp)
  193. }
  194. @server(
  195. group: chat
  196. middleware: OpenAuthority
  197. )
  198. service Wechat {
  199. @handler sendTextMsg
  200. post /wx/sendTextMsg (SendTextMsgReq) returns (BaseMsgResp)
  201. @handler AddFriendByPhone
  202. post /wx/add_friend_by_phone (AddFriendByPhoneReq) returns (BaseMsgResp)
  203. }