2024-02-29 01:08:18 +08:00
package relay
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"net/http"
"one-api/common"
2024-03-20 17:07:42 +08:00
"one-api/constant"
2024-02-29 01:08:18 +08:00
"one-api/dto"
"one-api/model"
relaycommon "one-api/relay/common"
relayconstant "one-api/relay/constant"
2025-02-20 16:41:46 +08:00
"one-api/relay/helper"
2024-02-29 01:08:18 +08:00
"one-api/service"
2024-12-22 17:24:29 +08:00
"one-api/setting"
2025-03-11 17:02:35 +08:00
"one-api/setting/model_setting"
2024-02-29 01:08:18 +08:00
"strings"
"time"
2025-03-08 21:55:50 +08:00
"github.com/bytedance/gopkg/util/gopool"
"github.com/shopspring/decimal"
2024-02-29 01:08:18 +08:00
"github.com/gin-gonic/gin"
)
func getAndValidateTextRequest ( c * gin . Context , relayInfo * relaycommon . RelayInfo ) ( * dto . GeneralOpenAIRequest , error ) {
textRequest := & dto . GeneralOpenAIRequest { }
err := common . UnmarshalBodyReusable ( c , textRequest )
if err != nil {
return nil , err
}
if relayInfo . RelayMode == relayconstant . RelayModeModerations && textRequest . Model == "" {
textRequest . Model = "text-moderation-latest"
}
if relayInfo . RelayMode == relayconstant . RelayModeEmbeddings && textRequest . Model == "" {
textRequest . Model = c . Param ( "model" )
}
2024-12-01 08:24:41 +08:00
if textRequest . MaxTokens > math . MaxInt32 / 2 {
2024-02-29 01:08:18 +08:00
return nil , errors . New ( "max_tokens is invalid" )
}
if textRequest . Model == "" {
return nil , errors . New ( "model is required" )
}
switch relayInfo . RelayMode {
case relayconstant . RelayModeCompletions :
if textRequest . Prompt == "" {
return nil , errors . New ( "field prompt is required" )
}
case relayconstant . RelayModeChatCompletions :
2024-12-01 08:24:41 +08:00
if len ( textRequest . Messages ) == 0 {
2024-02-29 01:08:18 +08:00
return nil , errors . New ( "field messages is required" )
}
case relayconstant . RelayModeEmbeddings :
case relayconstant . RelayModeModerations :
2024-12-01 08:24:41 +08:00
if textRequest . Input == nil || textRequest . Input == "" {
2024-02-29 01:08:18 +08:00
return nil , errors . New ( "field input is required" )
}
case relayconstant . RelayModeEdits :
if textRequest . Instruction == "" {
return nil , errors . New ( "field instruction is required" )
}
}
relayInfo . IsStream = textRequest . Stream
return textRequest , nil
}
2024-10-07 20:35:33 +08:00
func TextHelper ( c * gin . Context ) ( openaiErr * dto . OpenAIErrorWithStatusCode ) {
2024-02-29 01:08:18 +08:00
relayInfo := relaycommon . GenRelayInfo ( c )
// get & validate textRequest 获取并验证文本请求
textRequest , err := getAndValidateTextRequest ( c , relayInfo )
if err != nil {
common . LogError ( c , fmt . Sprintf ( "getAndValidateTextRequest failed: %s" , err . Error ( ) ) )
2024-04-04 16:35:44 +08:00
return service . OpenAIErrorWrapperLocal ( err , "invalid_text_request" , http . StatusBadRequest )
2024-02-29 01:08:18 +08:00
}
2024-12-22 17:24:29 +08:00
if setting . ShouldCheckPromptSensitive ( ) {
2025-02-21 16:57:30 +08:00
words , err := checkRequestSensitive ( textRequest , relayInfo )
2024-05-23 23:59:55 +08:00
if err != nil {
2025-02-21 16:57:30 +08:00
common . LogWarn ( c , fmt . Sprintf ( "user sensitive words detected: %s" , strings . Join ( words , ", " ) ) )
2024-04-04 16:35:44 +08:00
return service . OpenAIErrorWrapperLocal ( err , "sensitive_words_detected" , http . StatusBadRequest )
2024-03-20 20:36:55 +08:00
}
2024-05-23 23:59:55 +08:00
}
2025-02-20 16:41:46 +08:00
err = helper . ModelMappedHelper ( c , relayInfo )
if err != nil {
return service . OpenAIErrorWrapperLocal ( err , "model_mapped_error" , http . StatusInternalServerError )
}
textRequest . Model = relayInfo . UpstreamModelName
2024-12-26 02:00:04 +08:00
// 获取 promptTokens, 如果上下文中已经存在, 则直接使用
var promptTokens int
if value , exists := c . Get ( "prompt_tokens" ) ; exists {
promptTokens = value . ( int )
2025-01-02 16:33:00 +08:00
relayInfo . PromptTokens = promptTokens
2024-12-26 02:00:04 +08:00
} else {
promptTokens , err = getPromptTokens ( textRequest , relayInfo )
// count messages token error 计算promptTokens错误
if err != nil {
return service . OpenAIErrorWrapper ( err , "count_token_messages_failed" , http . StatusInternalServerError )
}
c . Set ( "prompt_tokens" , promptTokens )
2024-02-29 01:08:18 +08:00
}
2025-02-28 19:17:15 +08:00
priceData , err := helper . ModelPriceHelper ( c , relayInfo , promptTokens , int ( textRequest . MaxTokens ) )
if err != nil {
return service . OpenAIErrorWrapperLocal ( err , "model_price_error" , http . StatusInternalServerError )
}
2025-03-08 01:30:50 +08:00
2024-02-29 01:08:18 +08:00
// pre-consume quota 预消耗配额
2025-02-20 16:41:46 +08:00
preConsumedQuota , userQuota , openaiErr := preConsumeQuota ( c , priceData . ShouldPreConsumedQuota , relayInfo )
2024-03-03 22:05:00 +08:00
if openaiErr != nil {
2024-02-29 01:08:18 +08:00
return openaiErr
}
2024-10-07 20:35:33 +08:00
defer func ( ) {
if openaiErr != nil {
returnPreConsumedQuota ( c , relayInfo , userQuota , preConsumedQuota )
}
} ( )
2024-07-19 14:06:10 +08:00
includeUsage := false
// 判断用户是否需要返回使用情况
if textRequest . StreamOptions != nil && textRequest . StreamOptions . IncludeUsage {
includeUsage = true
}
2024-07-08 02:00:39 +08:00
// 如果不支持StreamOptions, 将StreamOptions设置为nil
if ! relayInfo . SupportStreamOptions || ! textRequest . Stream {
textRequest . StreamOptions = nil
} else {
// 如果支持StreamOptions, 且请求中没有设置StreamOptions, 根据配置文件设置StreamOptions
if constant . ForceStreamOption {
textRequest . StreamOptions = & dto . StreamOptions {
IncludeUsage : true ,
}
}
}
2024-07-19 14:06:10 +08:00
if includeUsage {
relayInfo . ShouldIncludeUsage = true
2024-07-08 02:00:39 +08:00
}
2024-02-29 16:21:25 +08:00
adaptor := GetAdaptor ( relayInfo . ApiType )
2024-02-29 01:08:18 +08:00
if adaptor == nil {
2024-05-23 23:59:55 +08:00
return service . OpenAIErrorWrapperLocal ( fmt . Errorf ( "invalid api type: %d" , relayInfo . ApiType ) , "invalid_api_type" , http . StatusBadRequest )
2024-02-29 01:08:18 +08:00
}
2024-07-16 22:07:10 +08:00
adaptor . Init ( relayInfo )
2024-02-29 01:08:18 +08:00
var requestBody io . Reader
2024-07-08 01:27:57 +08:00
2025-03-11 17:02:35 +08:00
if model_setting . GetGlobalSettings ( ) . PassThroughRequestEnabled {
body , err := common . GetRequestBody ( c )
if err != nil {
return service . OpenAIErrorWrapperLocal ( err , "get_request_body_failed" , http . StatusInternalServerError )
}
requestBody = bytes . NewBuffer ( body )
} else {
convertedRequest , err := adaptor . ConvertRequest ( c , relayInfo , textRequest )
if err != nil {
return service . OpenAIErrorWrapperLocal ( err , "convert_request_failed" , http . StatusInternalServerError )
}
jsonData , err := json . Marshal ( convertedRequest )
if err != nil {
return service . OpenAIErrorWrapperLocal ( err , "json_marshal_failed" , http . StatusInternalServerError )
}
requestBody = bytes . NewBuffer ( jsonData )
2024-02-29 01:08:18 +08:00
}
2024-10-04 16:08:18 +08:00
var httpResp * http . Response
2024-02-29 01:08:18 +08:00
resp , err := adaptor . DoRequest ( c , relayInfo , requestBody )
2024-03-06 17:41:55 +08:00
if err != nil {
return service . OpenAIErrorWrapper ( err , "do_request_failed" , http . StatusInternalServerError )
}
2024-02-29 01:08:18 +08:00
2025-03-11 17:02:35 +08:00
statusCodeMappingStr := c . GetString ( "status_code_mapping" )
2024-04-23 11:44:40 +08:00
if resp != nil {
2024-10-04 16:08:18 +08:00
httpResp = resp . ( * http . Response )
relayInfo . IsStream = relayInfo . IsStream || strings . HasPrefix ( httpResp . Header . Get ( "Content-Type" ) , "text/event-stream" )
if httpResp . StatusCode != http . StatusOK {
2024-10-07 20:35:33 +08:00
openaiErr = service . RelayErrorHandler ( httpResp )
2024-04-23 11:44:40 +08:00
// reset status code 重置状态码
service . ResetStatusCode ( openaiErr , statusCodeMappingStr )
return openaiErr
}
2024-03-06 17:41:55 +08:00
}
2024-10-04 16:08:18 +08:00
usage , openaiErr := adaptor . DoResponse ( c , httpResp , relayInfo )
2024-02-29 01:08:18 +08:00
if openaiErr != nil {
2024-04-20 21:05:23 +08:00
// reset status code 重置状态码
service . ResetStatusCode ( openaiErr , statusCodeMappingStr )
2024-03-29 22:20:14 +08:00
return openaiErr
2024-02-29 01:08:18 +08:00
}
2024-11-07 16:12:09 +08:00
2025-02-20 16:41:46 +08:00
if strings . HasPrefix ( relayInfo . OriginModelName , "gpt-4o-audio" ) {
service . PostAudioConsumeQuota ( c , relayInfo , usage . ( * dto . Usage ) , preConsumedQuota , userQuota , priceData , "" )
2024-11-07 16:12:09 +08:00
} else {
2025-02-20 16:41:46 +08:00
postConsumeQuota ( c , relayInfo , usage . ( * dto . Usage ) , preConsumedQuota , userQuota , priceData , "" )
2024-11-07 16:12:09 +08:00
}
2024-02-29 01:08:18 +08:00
return nil
}
2024-05-23 23:59:55 +08:00
func getPromptTokens ( textRequest * dto . GeneralOpenAIRequest , info * relaycommon . RelayInfo ) ( int , error ) {
2024-02-29 01:08:18 +08:00
var promptTokens int
var err error
switch info . RelayMode {
case relayconstant . RelayModeChatCompletions :
2024-12-29 00:00:24 +08:00
promptTokens , err = service . CountTokenChatRequest ( info , * textRequest )
2024-02-29 01:08:18 +08:00
case relayconstant . RelayModeCompletions :
2024-05-23 23:59:55 +08:00
promptTokens , err = service . CountTokenInput ( textRequest . Prompt , textRequest . Model )
2024-02-29 01:08:18 +08:00
case relayconstant . RelayModeModerations :
2024-05-23 23:59:55 +08:00
promptTokens , err = service . CountTokenInput ( textRequest . Input , textRequest . Model )
2024-03-05 23:04:57 +08:00
case relayconstant . RelayModeEmbeddings :
2024-05-23 23:59:55 +08:00
promptTokens , err = service . CountTokenInput ( textRequest . Input , textRequest . Model )
2024-02-29 01:08:18 +08:00
default :
err = errors . New ( "unknown relay mode" )
promptTokens = 0
}
info . PromptTokens = promptTokens
2024-05-23 23:59:55 +08:00
return promptTokens , err
}
2025-02-21 16:57:30 +08:00
func checkRequestSensitive ( textRequest * dto . GeneralOpenAIRequest , info * relaycommon . RelayInfo ) ( [ ] string , error ) {
2024-05-23 23:59:55 +08:00
var err error
2025-02-21 16:57:30 +08:00
var words [ ] string
2024-05-23 23:59:55 +08:00
switch info . RelayMode {
case relayconstant . RelayModeChatCompletions :
2025-02-21 16:57:30 +08:00
words , err = service . CheckSensitiveMessages ( textRequest . Messages )
2024-05-23 23:59:55 +08:00
case relayconstant . RelayModeCompletions :
2025-02-21 16:57:30 +08:00
words , err = service . CheckSensitiveInput ( textRequest . Prompt )
2024-05-23 23:59:55 +08:00
case relayconstant . RelayModeModerations :
2025-02-21 16:57:30 +08:00
words , err = service . CheckSensitiveInput ( textRequest . Input )
2024-05-23 23:59:55 +08:00
case relayconstant . RelayModeEmbeddings :
2025-02-21 16:57:30 +08:00
words , err = service . CheckSensitiveInput ( textRequest . Input )
2024-05-23 23:59:55 +08:00
}
2025-02-21 16:57:30 +08:00
return words , err
2024-02-29 01:08:18 +08:00
}
// 预扣费并返回用户剩余配额
2024-02-29 16:39:52 +08:00
func preConsumeQuota ( c * gin . Context , preConsumedQuota int , relayInfo * relaycommon . RelayInfo ) ( int , int , * dto . OpenAIErrorWithStatusCode ) {
2024-12-29 16:50:26 +08:00
userQuota , err := model . GetUserQuota ( relayInfo . UserId , false )
2024-02-29 01:08:18 +08:00
if err != nil {
2024-04-04 16:35:44 +08:00
return 0 , 0 , service . OpenAIErrorWrapperLocal ( err , "get_user_quota_failed" , http . StatusInternalServerError )
2024-02-29 01:08:18 +08:00
}
2024-08-09 18:34:51 +08:00
if userQuota <= 0 {
2024-04-04 16:35:44 +08:00
return 0 , 0 , service . OpenAIErrorWrapperLocal ( errors . New ( "user quota is not enough" ) , "insufficient_user_quota" , http . StatusForbidden )
2024-02-29 01:08:18 +08:00
}
2024-08-09 18:34:51 +08:00
if userQuota - preConsumedQuota < 0 {
2025-02-21 18:18:21 +08:00
return 0 , 0 , service . OpenAIErrorWrapperLocal ( fmt . Errorf ( "chat pre-consumed quota failed, user quota: %s, need quota: %s" , common . FormatQuota ( userQuota ) , common . FormatQuota ( preConsumedQuota ) ) , "insufficient_user_quota" , http . StatusForbidden )
2024-08-09 18:34:51 +08:00
}
2025-02-25 20:56:16 +08:00
relayInfo . UserQuota = userQuota
2024-02-29 01:08:18 +08:00
if userQuota > 100 * preConsumedQuota {
// 用户额度充足,判断令牌额度是否充足
if ! relayInfo . TokenUnlimited {
// 非无限令牌,判断令牌额度是否充足
tokenQuota := c . GetInt ( "token_quota" )
if tokenQuota > 100 * preConsumedQuota {
// 令牌额度充足,信任令牌
preConsumedQuota = 0
2025-02-18 14:54:21 +08:00
common . LogInfo ( c , fmt . Sprintf ( "user %d quota %s and token %d quota %d are enough, trusted and no need to pre-consume" , relayInfo . UserId , common . FormatQuota ( userQuota ) , relayInfo . TokenId , tokenQuota ) )
2024-02-29 01:08:18 +08:00
}
} else {
// in this case, we do not pre-consume quota
// because the user has enough quota
preConsumedQuota = 0
2025-02-18 14:54:21 +08:00
common . LogInfo ( c , fmt . Sprintf ( "user %d with unlimited token has enough quota %s, trusted and no need to pre-consume" , relayInfo . UserId , common . FormatQuota ( userQuota ) ) )
2024-02-29 01:08:18 +08:00
}
}
2024-12-29 16:50:26 +08:00
2024-02-29 01:08:18 +08:00
if preConsumedQuota > 0 {
2025-02-25 20:56:16 +08:00
err := service . PreConsumeTokenQuota ( relayInfo , preConsumedQuota )
2024-02-29 01:08:18 +08:00
if err != nil {
2024-04-04 16:35:44 +08:00
return 0 , 0 , service . OpenAIErrorWrapperLocal ( err , "pre_consume_token_quota_failed" , http . StatusForbidden )
2024-02-29 01:08:18 +08:00
}
2024-12-30 17:10:48 +08:00
err = model . DecreaseUserQuota ( relayInfo . UserId , preConsumedQuota )
if err != nil {
return 0 , 0 , service . OpenAIErrorWrapperLocal ( err , "decrease_user_quota_failed" , http . StatusInternalServerError )
}
2024-02-29 01:08:18 +08:00
}
2024-02-29 16:39:52 +08:00
return preConsumedQuota , userQuota , nil
2024-02-29 01:08:18 +08:00
}
2024-09-26 00:59:09 +08:00
func returnPreConsumedQuota ( c * gin . Context , relayInfo * relaycommon . RelayInfo , userQuota int , preConsumedQuota int ) {
2024-03-06 17:41:55 +08:00
if preConsumedQuota != 0 {
2025-02-19 18:38:29 +08:00
gopool . Go ( func ( ) {
2024-12-01 08:24:41 +08:00
relayInfoCopy := * relayInfo
2025-02-18 14:54:21 +08:00
err := service . PostConsumeQuota ( & relayInfoCopy , - preConsumedQuota , 0 , false )
2024-03-06 17:41:55 +08:00
if err != nil {
common . SysError ( "error return pre-consumed quota: " + err . Error ( ) )
}
2025-02-19 18:38:29 +08:00
} )
2024-03-06 17:41:55 +08:00
}
}
2025-02-20 16:41:46 +08:00
func postConsumeQuota ( ctx * gin . Context , relayInfo * relaycommon . RelayInfo ,
usage * dto . Usage , preConsumedQuota int , userQuota int , priceData helper . PriceData , extraContent string ) {
2024-08-01 16:13:08 +08:00
if usage == nil {
usage = & dto . Usage {
PromptTokens : relayInfo . PromptTokens ,
CompletionTokens : 0 ,
TotalTokens : relayInfo . PromptTokens ,
}
2025-03-08 01:30:50 +08:00
extraContent += "(可能是请求出错)"
2024-08-01 16:13:08 +08:00
}
2024-02-29 01:08:18 +08:00
useTimeSeconds := time . Now ( ) . Unix ( ) - relayInfo . StartTime . Unix ( )
promptTokens := usage . PromptTokens
2025-03-08 01:30:50 +08:00
cacheTokens := usage . PromptTokensDetails . CachedTokens
2024-02-29 01:08:18 +08:00
completionTokens := usage . CompletionTokens
2025-02-20 16:41:46 +08:00
modelName := relayInfo . OriginModelName
2024-02-29 01:08:18 +08:00
tokenName := ctx . GetString ( "token_name" )
2025-03-08 01:30:50 +08:00
completionRatio := priceData . CompletionRatio
cacheRatio := priceData . CacheRatio
2025-02-20 16:41:46 +08:00
modelRatio := priceData . ModelRatio
groupRatio := priceData . GroupRatio
modelPrice := priceData . ModelPrice
2024-02-29 01:08:18 +08:00
2025-03-08 21:55:50 +08:00
// Convert values to decimal for precise calculation
dPromptTokens := decimal . NewFromInt ( int64 ( promptTokens ) )
dCacheTokens := decimal . NewFromInt ( int64 ( cacheTokens ) )
dCompletionTokens := decimal . NewFromInt ( int64 ( completionTokens ) )
dCompletionRatio := decimal . NewFromFloat ( completionRatio )
dCacheRatio := decimal . NewFromFloat ( cacheRatio )
dModelRatio := decimal . NewFromFloat ( modelRatio )
dGroupRatio := decimal . NewFromFloat ( groupRatio )
dModelPrice := decimal . NewFromFloat ( modelPrice )
dQuotaPerUnit := decimal . NewFromFloat ( common . QuotaPerUnit )
ratio := dModelRatio . Mul ( dGroupRatio )
var quotaCalculateDecimal decimal . Decimal
2025-02-20 16:41:46 +08:00
if ! priceData . UsePrice {
2025-03-08 21:55:50 +08:00
nonCachedTokens := dPromptTokens . Sub ( dCacheTokens )
cachedTokensWithRatio := dCacheTokens . Mul ( dCacheRatio )
promptQuota := nonCachedTokens . Add ( cachedTokensWithRatio )
completionQuota := dCompletionTokens . Mul ( dCompletionRatio )
quotaCalculateDecimal = promptQuota . Add ( completionQuota ) . Mul ( ratio )
if ! ratio . IsZero ( ) && quotaCalculateDecimal . LessThanOrEqual ( decimal . Zero ) {
quotaCalculateDecimal = decimal . NewFromInt ( 1 )
2024-02-29 01:08:18 +08:00
}
} else {
2025-03-08 21:55:50 +08:00
quotaCalculateDecimal = dModelPrice . Mul ( dQuotaPerUnit ) . Mul ( dGroupRatio )
2024-02-29 01:08:18 +08:00
}
2025-03-08 21:55:50 +08:00
quota := int ( quotaCalculateDecimal . Round ( 0 ) . IntPart ( ) )
2024-02-29 01:08:18 +08:00
totalTokens := promptTokens + completionTokens
2025-03-08 16:44:08 +08:00
2024-02-29 01:08:18 +08:00
var logContent string
2025-03-08 01:30:50 +08:00
if ! priceData . UsePrice {
2024-08-16 18:27:26 +08:00
logContent = fmt . Sprintf ( "模型倍率 %.2f,补全倍率 %.2f,分组倍率 %.2f" , modelRatio , completionRatio , groupRatio )
2024-02-29 01:08:18 +08:00
} else {
logContent = fmt . Sprintf ( "模型价格 %.2f,分组倍率 %.2f" , modelPrice , groupRatio )
}
// record all the consume log even if quota is 0
if totalTokens == 0 {
// in this case, must be some error happened
// we cannot just return, because we may have to return the pre-consumed quota
quota = 0
logContent += fmt . Sprintf ( "(可能是上游超时)" )
2024-07-06 17:09:22 +08:00
common . LogError ( ctx , fmt . Sprintf ( "total tokens is 0, cannot consume quota, userId %d, channelId %d, " +
"tokenId %d, model %s, pre-consumed quota %d" , relayInfo . UserId , relayInfo . ChannelId , relayInfo . TokenId , modelName , preConsumedQuota ) )
2024-02-29 01:08:18 +08:00
} else {
quotaDelta := quota - preConsumedQuota
2024-04-04 20:10:30 +08:00
if quotaDelta != 0 {
2025-02-18 14:54:21 +08:00
err := service . PostConsumeQuota ( relayInfo , quotaDelta , preConsumedQuota , true )
2024-04-04 20:10:30 +08:00
if err != nil {
common . LogError ( ctx , "error consuming token remain quota: " + err . Error ( ) )
}
2024-02-29 01:08:18 +08:00
}
model . UpdateUserUsedQuotaAndRequestCount ( relayInfo . UserId , quota )
model . UpdateChannelUsedQuota ( relayInfo . ChannelId , quota )
}
2024-07-06 17:09:22 +08:00
logModel := modelName
2024-02-29 01:08:18 +08:00
if strings . HasPrefix ( logModel , "gpt-4-gizmo" ) {
logModel = "gpt-4-gizmo-*"
2024-07-06 17:09:22 +08:00
logContent += fmt . Sprintf ( ",模型 %s" , modelName )
2024-02-29 01:08:18 +08:00
}
2024-08-16 17:25:03 +08:00
if strings . HasPrefix ( logModel , "gpt-4o-gizmo" ) {
logModel = "gpt-4o-gizmo-*"
logContent += fmt . Sprintf ( ",模型 %s" , modelName )
}
2024-07-17 23:50:37 +08:00
if extraContent != "" {
2024-07-18 00:41:31 +08:00
logContent += ", " + extraContent
2024-07-17 23:50:37 +08:00
}
2025-03-08 01:30:50 +08:00
other := service . GenerateTextOtherInfo ( ctx , relayInfo , modelRatio , groupRatio , completionRatio , cacheTokens , cacheRatio , modelPrice )
2024-07-06 17:09:22 +08:00
model . RecordConsumeLog ( ctx , relayInfo . UserId , relayInfo . ChannelId , promptTokens , completionTokens , logModel ,
2024-12-24 14:48:11 +08:00
tokenName , quota , logContent , relayInfo . TokenId , userQuota , int ( useTimeSeconds ) , relayInfo . IsStream , relayInfo . Group , other )
2024-02-29 01:08:18 +08:00
}