chat.api 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. import "../base.api"
  2. type (
  3. //以下是API请求类型
  4. CompApiReq {
  5. CompCtlReq
  6. StdCompApiReq
  7. FastGptSpecReq
  8. }
  9. //FastGpt Completions请求信息
  10. FastGptApiReq {
  11. StdCompApiReq
  12. FastGptSpecReq
  13. }
  14. //标准Completions请求信息
  15. StdCompApiReq {
  16. //model,like 'gpt-4o'
  17. Model string `json:"model,optional"`
  18. //Message list
  19. Messages []StdCompMessage `json:"messages"`
  20. //Stream 是否流式输出
  21. Stream bool `json:"stream,default=false"`
  22. }
  23. //关于工作流配置的请求信息
  24. CompCtlReq {
  25. //EventType事件类型
  26. EventType string `json:"event_type,default=fastgpt"`
  27. //WorkId工作流ID
  28. WorkId string `json:"work_id"`
  29. //IsBatch 是同步还是异步,默认及取值false表明同步
  30. IsBatch bool `json:"is_batch,default=false"`
  31. //异步回调地址
  32. Callback string `json:"callback,optional"`
  33. }
  34. FastGptSpecReq {
  35. //ChatId
  36. ChatId string `json:"chat_id,optional"`
  37. //FastgptChatId
  38. FastgptChatId string `json:"chatId,optional"`
  39. //ResponseChatItemId
  40. ResponseChatItemId string `json:"response_chat_item_id,optional"`
  41. //Detail 详情开关
  42. Detail bool `json:"detail,default=false"`
  43. //Variables
  44. Variables map[string]string `json:"variables,optional"`
  45. }
  46. StdCompMessage {
  47. Role string `json:"role"`
  48. Content string `json:"content"`
  49. }
  50. //以下是API响应类型
  51. CompOpenApiResp {
  52. StdCompApiResp
  53. FastgptSpecResp
  54. }
  55. StdCompApiResp {
  56. // A unique identifier for the chat completion.
  57. ID string `json:"id"`
  58. // A list of chat completion choices. Can be more than one if `n` is greater
  59. // than 1.
  60. Choices []ChatCompletionChoice `json:"choices"`
  61. // The Unix timestamp (in seconds) of when the chat completion was created.
  62. Created int64 `json:"created"`
  63. // The model used for the chat completion.
  64. Model string `json:"model"`
  65. // The object type, which is always `chat.completion`.
  66. Object string `json:"object"`
  67. // The service tier used for processing the request.
  68. ServiceTier string `json:"service_tier,omitempty"`
  69. // This fingerprint represents the backend configuration that the model runs with.
  70. //
  71. // Can be used in conjunction with the `seed` request parameter to understand when
  72. // backend changes have been made that might impact determinism.
  73. SystemFingerprint string `json:"system_fingerprint"`
  74. // Usage statistics for the completion request.
  75. Usage CompletionUsage `json:"usage,omitempty"`
  76. }
  77. FastgptSpecResp {
  78. ResponseData []map[string]any `json:"responseData,omitempty"`
  79. NewVariables map[string]any `json:"newVariables,omitempty"`
  80. }
  81. ChatCompletionAudio {
  82. // Unique identifier for this audio response.
  83. ID string `json:"id"`
  84. //TODO
  85. }
  86. ChatCompletionMessage {
  87. // The contents of the message.
  88. Content string `json:"content"`
  89. //The contents of the reasoning message
  90. ReasoningContent string `json:"reasoning_content,omitempty"`
  91. // The refusal message generated by the model.
  92. Refusal string `json:"refusal"`
  93. // The role of the author of this message.
  94. Role string `json:"role"`
  95. // If the audio output modality is requested, this object contains data about the
  96. // audio response from the model.
  97. // [Learn more](https://platform.openai.com/docs/guides/audio).
  98. Audio ChatCompletionAudio `json:"audio,omitempty"`
  99. }
  100. ChatCompletionChoice {
  101. // The reason the model stopped generating tokens. This will be `stop` if the model
  102. // hit a natural stop point or a provided stop sequence, `length` if the maximum
  103. // number of tokens specified in the request was reached, `content_filter` if
  104. // content was omitted due to a flag from our content filters, `tool_calls` if the
  105. // model called a tool, or `function_call` (deprecated) if the model called a
  106. // function.
  107. FinishReason string `json:"finish_reason"`
  108. // The index of the choice in the list of choices.
  109. Index int64 `json:"index"`
  110. // A chat completion message generated by the model.
  111. Message ChatCompletionMessage `json:"message,omitempty"`
  112. // A chat completion message generated by the model stream mode.
  113. Delta ChatCompletionMessage `json:"delta,omitempty"`
  114. }
  115. CompletionUsageCompletionTokensDetails {
  116. // When using Predicted Outputs, the number of tokens in the prediction that
  117. // appeared in the completion.
  118. AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
  119. // Audio input tokens generated by the model.
  120. AudioTokens int64 `json:"audio_tokens"`
  121. // Tokens generated by the model for reasoning.
  122. ReasoningTokens int64 `json:"reasoning_tokens"`
  123. // When using Predicted Outputs, the number of tokens in the prediction that did
  124. // not appear in the completion. However, like reasoning tokens, these tokens are
  125. // still counted in the total completion tokens for purposes of billing, output,
  126. // and context window limits.
  127. RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
  128. }
  129. CompletionUsagePromptTokensDetails {
  130. // Audio input tokens present in the prompt.
  131. AudioTokens int64 `json:"audio_tokens"`
  132. // Cached tokens present in the prompt.
  133. CachedTokens int64 `json:"cached_tokens"`
  134. }
  135. CompletionUsage {
  136. // Number of tokens in the generated completion.
  137. CompletionTokens int64 `json:"completion_tokens,required"`
  138. // Number of tokens in the prompt.
  139. PromptTokens int64 `json:"prompt_tokens,required"`
  140. // Total number of tokens used in the request (prompt + completion).
  141. TotalTokens int64 `json:"total_tokens,required"`
  142. // Breakdown of tokens used in a completion.
  143. CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
  144. // Breakdown of tokens used in the prompt.
  145. PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
  146. }
  147. )
  148. @server(
  149. group: chat
  150. prefix: /v1
  151. )
  152. service Wechat {
  153. @handler getAuth
  154. get /chat/getauth () returns (BaseMsgResp)
  155. }
  156. @server(
  157. group: chat
  158. prefix: /v1
  159. //jwt: Auth
  160. middleware: OpenAuthority
  161. )
  162. service Wechat {
  163. @handler chatCompletions
  164. post /chat/completions (CompApiReq) returns (CompOpenApiResp)
  165. }