chat.api 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. import "../base.api"
  2. type (
  3. //以下是API请求类型
  4. CompApiReq {
  5. CompCtlReq
  6. StdCompApiReq
  7. FastGptSpecReq
  8. }
  9. //FastGpt Completions请求信息
  10. FastGptApiReq {
  11. StdCompApiReq
  12. FastGptSpecReq
  13. }
  14. //标准Completions请求信息
  15. StdCompApiReq {
  16. //model,like 'gpt-4o'
  17. Model string `json:"model,optional"`
  18. //Message list
  19. Messages []StdCompMessage `json:"messages"`
  20. //Stream 是否流式输出
  21. Stream bool `json:"stream,default=false"`
  22. }
  23. //关于工作流配置的请求信息
  24. CompCtlReq {
  25. //EventType事件类型
  26. EventType string `json:"event_type,default=fastgpt"`
  27. //WorkId工作流ID
  28. WorkId string `json:"work_id"`
  29. //IsBatch 是同步还是异步,默认及取值false表明同步
  30. IsBatch bool `json:"is_batch,default=false"`
  31. //异步回调地址
  32. Callback string `json:"callback,optional"`
  33. }
  34. FastGptSpecReq {
  35. //ChatId
  36. ChatId string `json:"chat_id,optional"`
  37. //ResponseChatItemId
  38. ResponseChatItemId string `json:"response_chat_item_id,optional"`
  39. //Detail 详情开关
  40. Detail bool `json:"detail,default=false"`
  41. //Variables
  42. Variables map[string]string `json:"variables,optional"`
  43. }
  44. StdCompMessage {
  45. Role string `json:"role"`
  46. Content string `json:"content"`
  47. }
  48. //以下是API响应类型
  49. CompOpenApiResp {
  50. StdCompApiResp
  51. FastgptSpecResp
  52. }
  53. StdCompApiResp {
  54. // A unique identifier for the chat completion.
  55. ID string `json:"id"`
  56. // A list of chat completion choices. Can be more than one if `n` is greater
  57. // than 1.
  58. Choices []ChatCompletionChoice `json:"choices"`
  59. // The Unix timestamp (in seconds) of when the chat completion was created.
  60. Created int64 `json:"created"`
  61. // The model used for the chat completion.
  62. Model string `json:"model"`
  63. // The object type, which is always `chat.completion`.
  64. Object string `json:"object"`
  65. // The service tier used for processing the request.
  66. ServiceTier string `json:"service_tier,omitempty"`
  67. // This fingerprint represents the backend configuration that the model runs with.
  68. //
  69. // Can be used in conjunction with the `seed` request parameter to understand when
  70. // backend changes have been made that might impact determinism.
  71. SystemFingerprint string `json:"system_fingerprint"`
  72. // Usage statistics for the completion request.
  73. Usage CompletionUsage `json:"usage,omitempty"`
  74. }
  75. FastgptSpecResp {
  76. ResponseData []map[string]string `json:"responseData,omitempty"`
  77. NewVariables map[string]string `json:"newVariables,omitempty"`
  78. }
  79. ChatCompletionAudio {
  80. // Unique identifier for this audio response.
  81. ID string `json:"id"`
  82. //TODO
  83. }
  84. ChatCompletionMessage {
  85. // The contents of the message.
  86. Content string `json:"content"`
  87. //The contents of the reasoning message
  88. ReasoningContent string `json:"reasoning_content,omitempty"`
  89. // The refusal message generated by the model.
  90. Refusal string `json:"refusal"`
  91. // The role of the author of this message.
  92. Role string `json:"role"`
  93. // If the audio output modality is requested, this object contains data about the
  94. // audio response from the model.
  95. // [Learn more](https://platform.openai.com/docs/guides/audio).
  96. Audio ChatCompletionAudio `json:"audio,omitempty"`
  97. }
  98. ChatCompletionChoice {
  99. // The reason the model stopped generating tokens. This will be `stop` if the model
  100. // hit a natural stop point or a provided stop sequence, `length` if the maximum
  101. // number of tokens specified in the request was reached, `content_filter` if
  102. // content was omitted due to a flag from our content filters, `tool_calls` if the
  103. // model called a tool, or `function_call` (deprecated) if the model called a
  104. // function.
  105. FinishReason string `json:"finish_reason"`
  106. // The index of the choice in the list of choices.
  107. Index int64 `json:"index"`
  108. // A chat completion message generated by the model.
  109. Message ChatCompletionMessage `json:"message,omitempty"`
  110. // A chat completion message generated by the model stream mode.
  111. Delta ChatCompletionMessage `json:"delta,omitempty"`
  112. }
  113. CompletionUsageCompletionTokensDetails {
  114. // When using Predicted Outputs, the number of tokens in the prediction that
  115. // appeared in the completion.
  116. AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
  117. // Audio input tokens generated by the model.
  118. AudioTokens int64 `json:"audio_tokens"`
  119. // Tokens generated by the model for reasoning.
  120. ReasoningTokens int64 `json:"reasoning_tokens"`
  121. // When using Predicted Outputs, the number of tokens in the prediction that did
  122. // not appear in the completion. However, like reasoning tokens, these tokens are
  123. // still counted in the total completion tokens for purposes of billing, output,
  124. // and context window limits.
  125. RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
  126. }
  127. CompletionUsagePromptTokensDetails {
  128. // Audio input tokens present in the prompt.
  129. AudioTokens int64 `json:"audio_tokens"`
  130. // Cached tokens present in the prompt.
  131. CachedTokens int64 `json:"cached_tokens"`
  132. }
  133. CompletionUsage {
  134. // Number of tokens in the generated completion.
  135. CompletionTokens int64 `json:"completion_tokens,required"`
  136. // Number of tokens in the prompt.
  137. PromptTokens int64 `json:"prompt_tokens,required"`
  138. // Total number of tokens used in the request (prompt + completion).
  139. TotalTokens int64 `json:"total_tokens,required"`
  140. // Breakdown of tokens used in a completion.
  141. CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
  142. // Breakdown of tokens used in the prompt.
  143. PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
  144. }
  145. )
  146. @server(
  147. group: chat
  148. prefix: /v1
  149. )
  150. service Wechat {
  151. @handler getAuth
  152. get /chat/getauth () returns (BaseMsgResp)
  153. }
  154. @server(
  155. group: chat
  156. prefix: /v1
  157. //jwt: Auth
  158. middleware: OpenAuthority
  159. )
  160. service Wechat {
  161. @handler chatCompletions
  162. post /chat/completions (CompApiReq) returns (CompOpenApiResp)
  163. }