Skip to content

Commit

Permalink
feat: update and optimize tokenizer performance (#191)
Browse files Browse the repository at this point in the history
Co-Authored-By: Minghan Zhang <[email protected]>
  • Loading branch information
Sh1n3zZ and zmh-program committed Jun 21, 2024
1 parent 2024302 commit 4c3843b
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 22 deletions.
5 changes: 3 additions & 2 deletions globals/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@ package globals

import (
"fmt"
"strings"

"github.com/natefinch/lumberjack"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"strings"
)

const DefaultLoggerFile = "chatnio.log"
Expand All @@ -25,7 +26,7 @@ func (l *AppLogger) Format(entry *logrus.Entry) ([]byte, error) {
)

if !viper.GetBool("log.ignore_console") {
fmt.Println(data)
fmt.Print(data)
}

return []byte(data), nil
Expand Down
11 changes: 3 additions & 8 deletions utils/buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,11 +197,6 @@ func (b *Buffer) IsFunctionCalling() bool {
return b.FunctionCall != nil || b.ToolCalls != nil
}

func (b *Buffer) WriteBytes(data []byte) []byte {
b.Write(string(data))
return data
}

func (b *Buffer) IsEmpty() bool {
return b.Cursor == 0 && !b.IsFunctionCalling()
}
Expand Down Expand Up @@ -237,12 +232,12 @@ func (b *Buffer) SetInputTokens(tokens int) {
b.InputTokens = tokens
}

func (b *Buffer) CountInputToken() int {
return b.InputTokens
func (b *Buffer) CountOutputToken() int {
return b.ReadTimes() * GetWeightByModel(b.Model)
}

func (b *Buffer) CountOutputToken() int {

Check failure on line 239 in utils/buffer.go

View workflow job for this annotation

GitHub Actions / release (18.x)

method Buffer.CountOutputToken already declared at utils/buffer.go:235:18

Check failure on line 239 in utils/buffer.go

View workflow job for this annotation

GitHub Actions / release (18.x)

method Buffer.CountOutputToken already declared at utils/buffer.go:235:18
return b.ReadTimes() * GetWeightByModel(b.Model)
return b.CountInputToken() + b.CountOutputToken()

Check failure on line 240 in utils/buffer.go

View workflow job for this annotation

GitHub Actions / release (18.x)

b.CountInputToken undefined (type *Buffer has no field or method CountInputToken)

Check failure on line 240 in utils/buffer.go

View workflow job for this annotation

GitHub Actions / release (18.x)

b.CountInputToken undefined (type *Buffer has no field or method CountInputToken)
}

func (b *Buffer) CountToken() int {
Expand Down
30 changes: 18 additions & 12 deletions utils/tokenizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@ package utils
import (
"chat/globals"
"fmt"
"github.com/pkoukk/tiktoken-go"
"strings"

"github.com/pkoukk/tiktoken-go"
)

// Using https://github.com/pkoukk/tiktoken-go
Expand Down Expand Up @@ -45,9 +46,10 @@ func GetWeightByModel(model string) int {
}
}
}
func NumTokensFromMessages(messages []globals.Message, model string) (tokens int) {
func NumTokensFromMessages(messages []globals.Message, model string, responseType bool) (tokens int) {
tokensPerMessage := GetWeightByModel(model)
tkm, err := tiktoken.EncodingForModel(model)

if err != nil {
// the method above was deprecated, use the recall method instead
// can not encode messages, use length of messages as a proxy for number of tokens
Expand All @@ -59,25 +61,29 @@ func NumTokensFromMessages(messages []globals.Message, model string) (tokens int
if globals.DebugMode {
globals.Debug(fmt.Sprintf("[tiktoken] error encoding messages: %s (model: %s), using default model instead", err, model))
}
return NumTokensFromMessages(messages, globals.GPT3Turbo0613)
return NumTokensFromMessages(messages, globals.GPT3Turbo0613, responseType)
}

for _, message := range messages {
tokens +=
len(tkm.Encode(message.Content, nil, nil)) +
len(tkm.Encode(message.Role, nil, nil)) +
tokensPerMessage
tokens += len(tkm.Encode(message.Content, nil, nil))

if !responseType {
tokens += len(tkm.Encode(message.Role, nil, nil)) + tokensPerMessage
}
}

if !responseType {
tokens += 3 // every reply is primed with <|start|>assistant<|message|>
}
tokens += 3 // every reply is primed with <|start|>assistant<|message|>

if globals.DebugMode {
globals.Debug(fmt.Sprintf("[tiktoken] num tokens from messages: %d (tokens per message: %d, model: %s)", tokens, tokensPerMessage, model))
}
return tokens
}

func CountTokenPrice(messages []globals.Message, model string) int {
return NumTokensFromMessages(messages, model) * GetWeightByModel(model)
func NumTokensFromResponse(response string, model string) int {
return NumTokensFromMessages([]globals.Message{{Content: response}}, model, true)
}

func CountInputQuota(charge Charge, token int) float32 {
Expand All @@ -88,10 +94,10 @@ func CountInputQuota(charge Charge, token int) float32 {
return 0
}

func CountOutputToken(charge Charge, model string, token int) float32 {
func CountOutputToken(charge Charge, token int) float32 {
switch charge.GetType() {
case globals.TokenBilling:
return float32(token*GetWeightByModel(model)) / 1000 * charge.GetOutput()
return float32(token) / 1000 * charge.GetOutput()
case globals.TimesBilling:
return charge.GetOutput()
default:
Expand Down

0 comments on commit 4c3843b

Please sign in to comment.