chat.api 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. import "../base.api"
  2. type (
  3. //add_friend_by_phone api接口请求值
  4. AddFriendByPhoneReq {
  5. Type int `json:"type"`
  6. WeChatId string `json:"WeChatId"`
  7. Phone string `json:"phone"`
  8. Message string `json:"message"`
  9. }
  10. //以下是API请求类型
  11. CompApiReq {
  12. CompCtlReq
  13. StdCompApiReq
  14. FastGptSpecReq
  15. }
  16. //FastGpt Completions请求信息
  17. FastGptApiReq {
  18. StdCompApiReq
  19. FastGptSpecReq
  20. }
  21. //标准Completions请求信息
  22. StdCompApiReq {
  23. //model,like 'gpt-4o'
  24. Model string `json:"model,optional"`
  25. //Message list
  26. Messages []StdCompMessage `json:"messages"`
  27. //Stream 是否流式输出
  28. Stream bool `json:"stream,default=false"`
  29. //格式化输出定义
  30. ResponseFormat interface{} `json:"response_format,omitempty"`
  31. }
  32. //关于工作流配置的请求信息
  33. CompCtlReq {
  34. //EventType事件类型
  35. EventType string `json:"event_type,default=fastgpt"`
  36. //WorkId工作流ID
  37. WorkId string `json:"work_id,optional,omitempty"`
  38. //IsBatch 是同步还是异步,默认及取值false表明同步
  39. IsBatch bool `json:"is_batch,default=false"`
  40. //异步回调地址
  41. Callback string `json:"callback,optional,omitempty"`
  42. }
  43. FastGptSpecReq {
  44. //ChatId
  45. ChatId string `json:"chat_id,optional,omitempty"`
  46. //FastgptChatId
  47. FastgptChatId string `json:"chatId,optional,omitempty"`
  48. //ResponseChatItemId
  49. ResponseChatItemId string `json:"response_chat_item_id,optional,omitempty"`
  50. //Detail 详情开关
  51. Detail bool `json:"detail,default=false"`
  52. //Variables
  53. Variables map[string]string `json:"variables,optional,omitempty"`
  54. }
  55. StdCompMessage {
  56. Role string `json:"role"`
  57. Content interface{} `json:"content"`
  58. //Content string `json:"content"`
  59. }
  60. //以下是API响应类型
  61. CompOpenApiResp {
  62. StdCompApiResp
  63. FastgptSpecResp
  64. FastgptErrResp
  65. }
  66. StdCompApiResp {
  67. // A unique identifier for the chat completion.
  68. ID string `json:"id"`
  69. // A list of chat completion choices. Can be more than one if `n` is greater
  70. // than 1.
  71. Choices []ChatCompletionChoice `json:"choices"`
  72. // The Unix timestamp (in seconds) of when the chat completion was created.
  73. Created int64 `json:"created"`
  74. // The model used for the chat completion.
  75. Model string `json:"model"`
  76. // The object type, which is always `chat.completion`.
  77. Object string `json:"object"`
  78. // The service tier used for processing the request.
  79. ServiceTier string `json:"service_tier,omitempty"`
  80. // This fingerprint represents the backend configuration that the model runs with.
  81. //
  82. // Can be used in conjunction with the `seed` request parameter to understand when
  83. // backend changes have been made that might impact determinism.
  84. SystemFingerprint string `json:"system_fingerprint"`
  85. // Usage statistics for the completion request.
  86. Usage CompletionUsage `json:"usage,omitempty"`
  87. }
  88. FastgptSpecResp {
  89. ResponseData []map[string]interface{} `json:"responseData,omitempty"`
  90. NewVariables map[string]interface{} `json:"newVariables,omitempty"`
  91. }
  92. FastgptErrResp {
  93. FgtErrCode *int `json:"code,omitempty"`
  94. FgtErrStatusTxt *string `json:"statusText,omitempty"`
  95. FgtErrMessage *string `json:"message,omitempty"`
  96. }
  97. DeepseekErrResp {
  98. DSErr DeepseekErrInfo `json:"error,omitempty"`
  99. }
  100. DeepseekErrInfo {
  101. Message string `json:"message,omitempty"`
  102. Type string `json:"type,omitempty"`
  103. Code string `json:"code,omitempty"`
  104. Param interface{} `json:"param,omitempty"`
  105. }
  106. ChatCompletionAudio {
  107. // Unique identifier for this audio response.
  108. ID string `json:"id"`
  109. //TODO
  110. }
  111. ChatCompletionMessage {
  112. // The contents of the message.
  113. Content string `json:"content"`
  114. //The contents of the reasoning message
  115. ReasoningContent string `json:"reasoning_content,omitempty"`
  116. // The refusal message generated by the model.
  117. Refusal string `json:"refusal"`
  118. // The role of the author of this message.
  119. Role string `json:"role"`
  120. // If the audio output modality is requested, this object contains data about the
  121. // audio response from the model.
  122. // [Learn more](https://platform.openai.com/docs/guides/audio).
  123. Audio ChatCompletionAudio `json:"audio,omitempty"`
  124. }
  125. ChatCompletionChoice {
  126. // The reason the model stopped generating tokens. This will be `stop` if the model
  127. // hit a natural stop point or a provided stop sequence, `length` if the maximum
  128. // number of tokens specified in the request was reached, `content_filter` if
  129. // content was omitted due to a flag from our content filters, `tool_calls` if the
  130. // model called a tool, or `function_call` (deprecated) if the model called a
  131. // function.
  132. FinishReason string `json:"finish_reason"`
  133. // The index of the choice in the list of choices.
  134. Index int64 `json:"index"`
  135. // A chat completion message generated by the model.
  136. Message ChatCompletionMessage `json:"message,omitempty"`
  137. // A chat completion message generated by the model stream mode.
  138. Delta ChatCompletionMessage `json:"delta,omitempty"`
  139. //
  140. Logprobs string `json:"logprobs"`
  141. }
  142. CompletionUsageCompletionTokensDetails {
  143. // When using Predicted Outputs, the number of tokens in the prediction that
  144. // appeared in the completion.
  145. AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
  146. // Audio input tokens generated by the model.
  147. AudioTokens int64 `json:"audio_tokens"`
  148. // Tokens generated by the model for reasoning.
  149. ReasoningTokens int64 `json:"reasoning_tokens"`
  150. // When using Predicted Outputs, the number of tokens in the prediction that did
  151. // not appear in the completion. However, like reasoning tokens, these tokens are
  152. // still counted in the total completion tokens for purposes of billing, output,
  153. // and context window limits.
  154. RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
  155. }
  156. CompletionUsagePromptTokensDetails {
  157. // Audio input tokens present in the prompt.
  158. AudioTokens int64 `json:"audio_tokens"`
  159. // Cached tokens present in the prompt.
  160. CachedTokens int64 `json:"cached_tokens"`
  161. }
  162. CompletionUsage {
  163. // Number of tokens in the generated completion.
  164. CompletionTokens int64 `json:"completion_tokens,required"`
  165. // Number of tokens in the prompt.
  166. PromptTokens int64 `json:"prompt_tokens,required"`
  167. // Total number of tokens used in the request (prompt + completion).
  168. TotalTokens int64 `json:"total_tokens,required"`
  169. // Breakdown of tokens used in a completion.
  170. CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
  171. // Breakdown of tokens used in the prompt.
  172. PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
  173. }
  174. )
  175. @server(
  176. group: chat
  177. prefix: /v1
  178. )
  179. service Wechat {
  180. @handler getAuth
  181. get /chat/getauth () returns (BaseMsgResp)
  182. }
  183. @server(
  184. group: chat
  185. prefix: /v1
  186. //jwt: Auth
  187. middleware: OpenAuthority
  188. )
  189. service Wechat {
  190. @handler chatCompletions
  191. post /chat/completions (CompApiReq) returns (CompOpenApiResp)
  192. }
  193. @server(
  194. group: chat
  195. middleware: OpenAuthority
  196. )
  197. service Wechat {
  198. @handler sendTextMsg
  199. post /wx/sendTextMsg (SendTextMsgReq) returns (BaseMsgResp)
  200. @handler AddFriendByPhone
  201. post /wx/add_friend_by_phone (AddFriendByPhoneReq) returns (BaseMsgResp)
  202. }