• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

umputun / tg-spam / 15672803049

16 Jun 2025 05:42AM UTC coverage: 79.352% (-2.1%) from 81.499%
15672803049

Pull #294

github

umputun
Add CLI override functionality for auth credentials in database mode

- Created applyCLIOverrides function to handle selective CLI parameter overrides
- Currently handles --server.auth and --server.auth-hash overrides
- Only applies overrides when values differ from defaults
- Auth hash takes precedence over password when both are provided
- Added comprehensive unit tests covering all override scenarios
- Function is extensible for future CLI override needs (documented in comments)

This fixes the issue where users couldn't change auth credentials when using
database configuration mode (--confdb), as the save-config command would
overwrite all settings rather than just the auth credentials.
Pull Request #294: Implement database configuration support

891 of 1298 new or added lines in 9 files covered. (68.64%)

174 existing lines in 4 files now uncovered.

5734 of 7226 relevant lines covered (79.35%)

57.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.02
/lib/tgspam/openai.go
1
package tgspam
2

3
import (
4
        "context"
5
        "encoding/json"
6
        "fmt"
7
        "strings"
8

9
        tokenizer "github.com/sandwich-go/gpt3-encoder"
10
        "github.com/sashabaranov/go-openai"
11

12
        "github.com/umputun/tg-spam/lib/spamcheck"
13
)
14

15
//go:generate moq --out mocks/openai_client.go --pkg mocks --with-resets --skip-ensure . openAIClient:OpenAIClientMock
16

17
// openAIChecker is a wrapper for OpenAI API to check if a text is spam
18
type openAIChecker struct {
19
        client openAIClient
20
        params OpenAIConfig
21
}
22

23
// OpenAIConfig contains parameters for openAIChecker
24
type OpenAIConfig struct {
25
        // https://platform.openai.com/docs/api-reference/chat/create#chat/create-max_tokens
26
        MaxTokensResponse int // hard limit for the number of tokens in the response
27
        // the OpenAI has a limit for the number of tokens in the request + response (4097)
28
        MaxTokensRequest  int // max request length in tokens
29
        MaxSymbolsRequest int // fallback: Max request length in symbols, if tokenizer was failed
30
        Model             string
31
        SystemPrompt      string
32
        CustomPrompts     []string // additional custom prompts that can be selected
33
        ReasoningEffort   string   // level of reasoning effort to use
34
        RetryCount        int
35
}
36

37
type openAIClient interface {
38
        CreateChatCompletion(context.Context, openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error)
39
}
40

41
const defaultPrompt = `I'll give you a text from the messaging application and you will return me a json with three fields: {"spam": true/false, "reason":"why this is spam", "confidence":1-100}. Set spam:true only of confidence above 80. Return JSON only with no extra formatting!` + "\n" + `If history of previous messages provided, use them as extra context to make the decision.`
42

43
type openAIResponse struct {
44
        IsSpam     bool   `json:"spam"`
45
        Reason     string `json:"reason"`
46
        Confidence int    `json:"confidence"`
47
}
48

49
// newOpenAIChecker makes a bot for ChatGPT
50
func newOpenAIChecker(client openAIClient, params OpenAIConfig) *openAIChecker {
12✔
51
        if params.SystemPrompt == "" {
24✔
52
                params.SystemPrompt = defaultPrompt
12✔
53
        }
12✔
54
        if params.MaxTokensResponse == 0 {
23✔
55
                params.MaxTokensResponse = 1024
11✔
56
        }
11✔
57
        if params.MaxTokensRequest == 0 {
23✔
58
                params.MaxTokensRequest = 1024
11✔
59
        }
11✔
60
        if params.MaxSymbolsRequest == 0 {
23✔
61
                params.MaxSymbolsRequest = 8192
11✔
62
        }
11✔
63
        if params.Model == "" {
12✔
UNCOV
64
                params.Model = "gpt-4o-mini"
×
UNCOV
65
        }
×
66
        if params.RetryCount <= 0 {
24✔
67
                params.RetryCount = 1
12✔
68
        }
12✔
69
        return &openAIChecker{client: client, params: params}
12✔
70
}
71

72
// check checks if a text is spam using OpenAI API
73
func (o *openAIChecker) check(msg string, history []spamcheck.Request) (spam bool, cr spamcheck.Response) {
14✔
74
        if o.client == nil {
14✔
UNCOV
75
                return false, spamcheck.Response{}
×
UNCOV
76
        }
×
77

78
        // update the message with the history
79
        if len(history) > 0 {
17✔
80
                var hist []string
3✔
81
                for _, h := range history {
10✔
82
                        hist = append(hist, fmt.Sprintf("%q: %q", h.UserName, h.Msg))
7✔
83
                }
7✔
84
                msgWithHist := fmt.Sprintf("User message:\n%s\n\nHistory:\n%s\n", msg, strings.Join(hist, "\n"))
3✔
85
                msg = msgWithHist
3✔
86
        }
87

88
        // try to send a request several times if it fails
89
        var resp openAIResponse
14✔
90
        var err error
14✔
91
        for i := 0; i < o.params.RetryCount; i++ {
28✔
92
                if resp, err = o.sendRequest(msg); err == nil {
24✔
93
                        break
10✔
94
                }
95
        }
96
        if err != nil {
18✔
97
                return false, spamcheck.Response{
4✔
98
                        Spam: false, Name: "openai", Details: fmt.Sprintf("OpenAI error: %v", err), Error: err}
4✔
99
        }
4✔
100

101
        return resp.IsSpam, spamcheck.Response{Spam: resp.IsSpam, Name: "openai",
10✔
102
                Details: strings.TrimSuffix(resp.Reason, ".") + ", confidence: " + fmt.Sprintf("%d%%", resp.Confidence)}
10✔
103
}
104

105
func (o *openAIChecker) sendRequest(msg string) (response openAIResponse, err error) {
14✔
106
        // reduce the request size with tokenizer and fallback to default reducer if it fails.
14✔
107
        // the API supports 4097 tokens ~16000 characters (<=4 per token) for request + result together.
14✔
108
        // the response is limited to 1000 tokens, and OpenAI always reserved it for the result.
14✔
109
        // so the max length of the request should be 3000 tokens or ~12000 characters
14✔
110
        reduceRequest := func(text string) (result string) {
28✔
111
                // defaultReducer is a fallback if tokenizer fails
14✔
112
                defaultReducer := func(text string) (result string) {
14✔
UNCOV
113
                        if len(text) <= o.params.MaxSymbolsRequest {
×
UNCOV
114
                                return text
×
UNCOV
115
                        }
×
UNCOV
116
                        return text[:o.params.MaxSymbolsRequest]
×
117
                }
118

119
                encoder, tokErr := tokenizer.NewEncoder()
14✔
120
                if tokErr != nil {
14✔
UNCOV
121
                        return defaultReducer(text)
×
UNCOV
122
                }
×
123

124
                tokens, encErr := encoder.Encode(text)
14✔
125
                if encErr != nil {
14✔
UNCOV
126
                        return defaultReducer(text)
×
UNCOV
127
                }
×
128

129
                if len(tokens) <= o.params.MaxTokensRequest {
28✔
130
                        return text
14✔
131
                }
14✔
132

UNCOV
133
                return encoder.Decode(tokens[:o.params.MaxTokensRequest])
×
134
        }
135

136
        r := reduceRequest(msg)
14✔
137

14✔
138
        data := []openai.ChatCompletionMessage{
14✔
139
                {Role: openai.ChatMessageRoleSystem, Content: o.params.SystemPrompt},
14✔
140
                {Role: openai.ChatMessageRoleUser, Content: r},
14✔
141
        }
14✔
142

14✔
143
        resp, err := o.client.CreateChatCompletion(
14✔
144
                context.Background(),
14✔
145
                openai.ChatCompletionRequest{
14✔
146
                        Model:          o.params.Model,
14✔
147
                        MaxTokens:      o.params.MaxTokensResponse,
14✔
148
                        Messages:       data,
14✔
149
                        ResponseFormat: &openai.ChatCompletionResponseFormat{Type: "json_object"},
14✔
150
                },
14✔
151
        )
14✔
152

14✔
153
        if err != nil {
16✔
154
                return openAIResponse{}, fmt.Errorf("failed to create chat completion: %w", err)
2✔
155
        }
2✔
156

157
        // openAI platform supports returning multiple chat completion choices, but we use only the first one:
158
        // https://platform.openai.com/docs/api-reference/chat/create#chat/create-n
159
        if len(resp.Choices) == 0 {
13✔
160
                return openAIResponse{}, fmt.Errorf("no choices in response")
1✔
161
        }
1✔
162

163
        if err := json.Unmarshal([]byte(resp.Choices[0].Message.Content), &response); err != nil {
12✔
164
                return openAIResponse{}, fmt.Errorf("can't unmarshal response: %s - %w", resp.Choices[0].Message.Content, err)
1✔
165
        }
1✔
166

167
        return response, nil
10✔
168
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc