chat.api 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. import "../base.api"
  2. type (
  3. //以下是API请求类型
  4. CompApiReq {
  5. CompCtlReq
  6. StdCompApiReq
  7. FastGptSpecReq
  8. }
  9. //FastGpt Completions请求信息
  10. FastGptApiReq {
  11. StdCompApiReq
  12. FastGptSpecReq
  13. }
  14. //标准Completions请求信息
  15. StdCompApiReq {
  16. //model,like 'gpt-4o'
  17. Model string `json:"model,optional"`
  18. //Message list
  19. Messages []StdCompMessage `json:"messages"`
  20. //Stream 是否流式输出
  21. Stream bool `json:"stream,default=false"`
  22. //格式化输出定义
  23. ResponseFormat interface{} `json:"response_format,omitempty"`
  24. }
  25. //关于工作流配置的请求信息
  26. CompCtlReq {
  27. //EventType事件类型
  28. EventType string `json:"event_type,default=fastgpt"`
  29. //WorkId工作流ID
  30. WorkId string `json:"work_id,optional,omitempty"`
  31. //IsBatch 是同步还是异步,默认及取值false表明同步
  32. IsBatch bool `json:"is_batch,default=false"`
  33. //异步回调地址
  34. Callback string `json:"callback,optional,omitempty"`
  35. }
  36. FastGptSpecReq {
  37. //ChatId
  38. ChatId string `json:"chat_id,optional,omitempty"`
  39. //FastgptChatId
  40. FastgptChatId string `json:"chatId,optional,omitempty"`
  41. //ResponseChatItemId
  42. ResponseChatItemId string `json:"response_chat_item_id,optional,omitempty"`
  43. //Detail 详情开关
  44. Detail bool `json:"detail,default=false"`
  45. //Variables
  46. Variables map[string]string `json:"variables,optional,omitempty"`
  47. }
  48. StdCompMessage {
  49. Role string `json:"role"`
  50. Content interface{} `json:"content"`
  51. //Content string `json:"content"`
  52. }
  53. //以下是API响应类型
  54. CompOpenApiResp {
  55. StdCompApiResp
  56. FastgptSpecResp
  57. FastgptErrResp
  58. }
  59. StdCompApiResp {
  60. // A unique identifier for the chat completion.
  61. ID string `json:"id"`
  62. // A list of chat completion choices. Can be more than one if `n` is greater
  63. // than 1.
  64. Choices []ChatCompletionChoice `json:"choices"`
  65. // The Unix timestamp (in seconds) of when the chat completion was created.
  66. Created int64 `json:"created"`
  67. // The model used for the chat completion.
  68. Model string `json:"model"`
  69. // The object type, which is always `chat.completion`.
  70. Object string `json:"object"`
  71. // The service tier used for processing the request.
  72. ServiceTier string `json:"service_tier,omitempty"`
  73. // This fingerprint represents the backend configuration that the model runs with.
  74. //
  75. // Can be used in conjunction with the `seed` request parameter to understand when
  76. // backend changes have been made that might impact determinism.
  77. SystemFingerprint string `json:"system_fingerprint"`
  78. // Usage statistics for the completion request.
  79. Usage CompletionUsage `json:"usage,omitempty"`
  80. }
  81. FastgptSpecResp {
  82. ResponseData []map[string]interface{} `json:"responseData,omitempty"`
  83. NewVariables map[string]interface{} `json:"newVariables,omitempty"`
  84. }
  85. FastgptErrResp {
  86. FgtErrCode *int `json:"code,omitempty"`
  87. FgtErrStatusTxt *string `json:"statusText,omitempty"`
  88. FgtErrMessage *string `json:"message,omitempty"`
  89. }
  90. DeepseekErrResp {
  91. DSErr DeepseekErrInfo `json:"error,omitempty"`
  92. }
  93. DeepseekErrInfo {
  94. Message string `json:"message,omitempty"`
  95. Type string `json:"type,omitempty"`
  96. Code string `json:"code,omitempty"`
  97. Param interface{} `json:"param,omitempty"`
  98. }
  99. ChatCompletionAudio {
  100. // Unique identifier for this audio response.
  101. ID string `json:"id"`
  102. //TODO
  103. }
  104. ChatCompletionMessage {
  105. // The contents of the message.
  106. Content string `json:"content"`
  107. //The contents of the reasoning message
  108. ReasoningContent string `json:"reasoning_content,omitempty"`
  109. // The refusal message generated by the model.
  110. Refusal string `json:"refusal"`
  111. // The role of the author of this message.
  112. Role string `json:"role"`
  113. // If the audio output modality is requested, this object contains data about the
  114. // audio response from the model.
  115. // [Learn more](https://platform.openai.com/docs/guides/audio).
  116. Audio ChatCompletionAudio `json:"audio,omitempty"`
  117. }
  118. ChatCompletionChoice {
  119. // The reason the model stopped generating tokens. This will be `stop` if the model
  120. // hit a natural stop point or a provided stop sequence, `length` if the maximum
  121. // number of tokens specified in the request was reached, `content_filter` if
  122. // content was omitted due to a flag from our content filters, `tool_calls` if the
  123. // model called a tool, or `function_call` (deprecated) if the model called a
  124. // function.
  125. FinishReason string `json:"finish_reason"`
  126. // The index of the choice in the list of choices.
  127. Index int64 `json:"index"`
  128. // A chat completion message generated by the model.
  129. Message ChatCompletionMessage `json:"message,omitempty"`
  130. // A chat completion message generated by the model stream mode.
  131. Delta ChatCompletionMessage `json:"delta,omitempty"`
  132. //
  133. Logprobs string `json:"logprobs"`
  134. }
  135. CompletionUsageCompletionTokensDetails {
  136. // When using Predicted Outputs, the number of tokens in the prediction that
  137. // appeared in the completion.
  138. AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
  139. // Audio input tokens generated by the model.
  140. AudioTokens int64 `json:"audio_tokens"`
  141. // Tokens generated by the model for reasoning.
  142. ReasoningTokens int64 `json:"reasoning_tokens"`
  143. // When using Predicted Outputs, the number of tokens in the prediction that did
  144. // not appear in the completion. However, like reasoning tokens, these tokens are
  145. // still counted in the total completion tokens for purposes of billing, output,
  146. // and context window limits.
  147. RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
  148. }
  149. CompletionUsagePromptTokensDetails {
  150. // Audio input tokens present in the prompt.
  151. AudioTokens int64 `json:"audio_tokens"`
  152. // Cached tokens present in the prompt.
  153. CachedTokens int64 `json:"cached_tokens"`
  154. }
  155. CompletionUsage {
  156. // Number of tokens in the generated completion.
  157. CompletionTokens int64 `json:"completion_tokens,required"`
  158. // Number of tokens in the prompt.
  159. PromptTokens int64 `json:"prompt_tokens,required"`
  160. // Total number of tokens used in the request (prompt + completion).
  161. TotalTokens int64 `json:"total_tokens,required"`
  162. // Breakdown of tokens used in a completion.
  163. CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
  164. // Breakdown of tokens used in the prompt.
  165. PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
  166. }
  167. )
  168. @server(
  169. group: chat
  170. prefix: /v1
  171. )
  172. service Wechat {
  173. @handler getAuth
  174. get /chat/getauth () returns (BaseMsgResp)
  175. }
  176. @server(
  177. group: chat
  178. prefix: /v1
  179. //jwt: Auth
  180. middleware: OpenAuthority
  181. )
  182. service Wechat {
  183. @handler chatCompletions
  184. post /chat/completions (CompApiReq) returns (CompOpenApiResp)
  185. }