import "../base.api"

type (
    
    //以下是API请求类型

	CompApiReq {
        CompCtlReq
		StdCompApiReq
        FastGptSpecReq
    }

	//FastGpt Completions请求信息
	FastGptApiReq {
		StdCompApiReq
        FastGptSpecReq
	}

	//标准Completions请求信息
	StdCompApiReq {
        //model,like 'gpt-4o'
        Model string `json:"model,optional"`
        //Message list
        Messages []StdCompMessage `json:"messages"`
        //Stream 是否流式输出
        Stream bool `json:"stream,default=false"`
    }

	//关于工作流配置的请求信息
	CompCtlReq {
		//EventType事件类型
        EventType string `json:"event_type,default=fastgpt"`
        //WorkId工作流ID
        WorkId string `json:"work_id"`
		//IsBatch 是同步还是异步,默认及取值false表明同步
        IsBatch bool `json:"is_batch,default=false"`
        //异步回调地址
        Callback string `json:"callback,optional"`
	}

	FastGptSpecReq {
        //ChatId
        ChatId string `json:"chat_id,optional"`
        //ResponseChatItemId
        ResponseChatItemId string `json:"response_chat_item_id,optional"`
        //Detail 详情开关
        Detail bool `json:"detail,default=false"`
        //Variables
        Variables map[string]string `json:"variables,optional"`
	}
	
	StdCompMessage {
        Role string `json:"role"`
        Content string `json:"content"`
    }

    //以下是API响应类型
	CompOpenApiResp {
        StdCompApiResp
		FastgptSpecResp
    }

	StdCompApiResp {
        // A unique identifier for the chat completion.
	    ID string `json:"id"`
	    // A list of chat completion choices. Can be more than one if `n` is greater
	    // than 1.
    	Choices []ChatCompletionChoice `json:"choices"`
	    // The Unix timestamp (in seconds) of when the chat completion was created.
	    Created int64 `json:"created"`
	    // The model used for the chat completion.
	    Model string `json:"model"`
	    // The object type, which is always `chat.completion`.
	    Object string `json:"object"`
	    // The service tier used for processing the request.
	    ServiceTier string `json:"service_tier,omitempty"`
	    // This fingerprint represents the backend configuration that the model runs with.
	    //
    	// Can be used in conjunction with the `seed` request parameter to understand when
	    // backend changes have been made that might impact determinism.
	    SystemFingerprint string `json:"system_fingerprint"`
	    // Usage statistics for the completion request.
	    Usage CompletionUsage    `json:"usage,omitempty"`
    }

	FastgptSpecResp {
		ResponseData []map[string]string `json:"responseData,omitempty"`
		NewVariables map[string]string `json:"newVariables,omitempty"`
	}
	
	ChatCompletionAudio {
	    // Unique identifier for this audio response.
	    ID string `json:"id"`
        //TODO
    }

    ChatCompletionMessage {
	    // The contents of the message.
	    Content string `json:"content"`
        //The contents of the reasoning message
        ReasoningContent string `json:"reasoning_content,omitempty"`
	    // The refusal message generated by the model.
	    Refusal string `json:"refusal"`
	    // The role of the author of this message.
	    Role string `json:"role"`
	    // If the audio output modality is requested, this object contains data about the
	    // audio response from the model.
	    // [Learn more](https://platform.openai.com/docs/guides/audio).
	    Audio ChatCompletionAudio `json:"audio,omitempty"`
	}

    ChatCompletionChoice {
	    // The reason the model stopped generating tokens. This will be `stop` if the model
	    // hit a natural stop point or a provided stop sequence, `length` if the maximum
	    // number of tokens specified in the request was reached, `content_filter` if
	    // content was omitted due to a flag from our content filters, `tool_calls` if the
	    // model called a tool, or `function_call` (deprecated) if the model called a
	    // function.
	    FinishReason string `json:"finish_reason"`
	    // The index of the choice in the list of choices.
	    Index int64 `json:"index"`
	    // A chat completion message generated by the model.
	    Message ChatCompletionMessage  `json:"message,omitempty"`
        // A chat completion message generated by the model stream mode.
	    Delta ChatCompletionMessage  `json:"delta,omitempty"`
    }

    CompletionUsageCompletionTokensDetails {
	    // When using Predicted Outputs, the number of tokens in the prediction that
	    // appeared in the completion.
	    AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
	    // Audio input tokens generated by the model.
	    AudioTokens int64 `json:"audio_tokens"`
	    // Tokens generated by the model for reasoning.
	    ReasoningTokens int64 `json:"reasoning_tokens"`
	    // When using Predicted Outputs, the number of tokens in the prediction that did
	    // not appear in the completion. However, like reasoning tokens, these tokens are
	    // still counted in the total completion tokens for purposes of billing, output,
	    // and context window limits.
	    RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
    }

    CompletionUsagePromptTokensDetails {
	    // Audio input tokens present in the prompt.
	    AudioTokens int64 `json:"audio_tokens"`
	    // Cached tokens present in the prompt.
	    CachedTokens int64 `json:"cached_tokens"`
    }

    CompletionUsage {
	    // Number of tokens in the generated completion.
	    CompletionTokens int64 `json:"completion_tokens,required"`
	    // Number of tokens in the prompt.
	    PromptTokens int64 `json:"prompt_tokens,required"`
	    // Total number of tokens used in the request (prompt + completion).
	    TotalTokens int64 `json:"total_tokens,required"`
	    // Breakdown of tokens used in a completion.
	    CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
	    // Breakdown of tokens used in the prompt.
	    PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
    }   

)

@server(
    group: chat
    prefix: /v1
)

service Wechat {
    
    @handler getAuth
    get /chat/getauth () returns (BaseMsgResp)
}

@server(
	
    group: chat
    prefix: /v1
	//jwt: Auth
	middleware: OpenAuthority
)

service Wechat {
    
    @handler chatCompletions
    post /chat/completions (CompApiReq) returns (CompOpenApiResp)
}