• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

kshard / thinker / 19148048311

06 Nov 2025 07:53PM UTC coverage: 63.636% (+9.6%) from 54.037%
19148048311

Pull #48

github

fogfish
increase test coverage
Pull Request #48: Use MCP Servers as an integration point instead of commands

116 of 148 new or added lines in 4 files covered. (78.38%)

5 existing lines in 1 file now uncovered.

322 of 506 relevant lines covered (63.64%)

0.69 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/agent/worker/jsonify.go
1
//
2
// Copyright (C) 2025 Dmitry Kolesnikov
3
//
4
// This file may be modified and distributed under the terms
5
// of the MIT license.  See the LICENSE file for details.
6
// https://github.com/kshard/thinker
7
//
8

9
package worker
10

11
import (
12
        "github.com/kshard/chatter"
13
        "github.com/kshard/thinker"
14
        "github.com/kshard/thinker/agent"
15
        "github.com/kshard/thinker/codec"
16
        "github.com/kshard/thinker/memory"
17
        "github.com/kshard/thinker/prompt/jsonify"
18
        "github.com/kshard/thinker/reasoner"
19
)
20

21
// Jsonify implementing request/response to LLMs, forcing the response to be JSON array.
22
type Jsonify[A any] struct {
23
        *agent.Automata[A, []string]
24
        encoder   thinker.Encoder[A]
25
        validator func([]string) error
26
}
27

28
func NewJsonify[A any](
29
        llm chatter.Chatter,
30
        attempts int,
31
        encoder thinker.Encoder[A],
32
        validator func([]string) error,
33
) *Jsonify[A] {
×
34
        w := &Jsonify[A]{encoder: encoder, validator: validator}
×
35
        w.Automata = agent.NewAutomata(llm,
×
36

×
37
                // Configures memory for the agent. Typically, memory retains all of
×
38
                // the agent's observations. Here, we use an infinite stream memory,
×
39
                // recalling all observations.
×
40
                memory.NewStream(memory.INFINITE, `
×
41
                        You are automomous agent who perform required tasks, providing results in JSON.
×
42
                `),
×
43

×
44
                // Configures the encoder to transform input of type A into a `chatter.Prompt`.
×
45
                // Here, it is defined by application
×
46
                codec.FromEncoder(w.encode),
×
47

×
48
                // Configure the decoder to transform output of LLM into type B.
×
49
                // Here, we use the identity decoder that returns LLMs output as-is.
×
50
                codec.FromDecoder(w.decode),
×
51

×
52
                // Configures the reasoner, which determines the agent's next actions and prompts.
×
53
                // Here, we use a sequence of command reasoner, it assumes that input prompt is
×
54
                // the workflow based on command. LLM guided to execute entire workflow.
×
55
                reasoner.NewEpoch(attempts, reasoner.From(w.deduct)),
×
56
        )
×
57

×
58
        return w
×
59
}
×
60

61
func (w *Jsonify[A]) encode(in A) (chatter.Message, error) {
×
62
        prompt, err := w.encoder.Encode(in)
×
63
        if err != nil {
×
64
                return nil, err
×
65
        }
×
66

67
        switch v := prompt.(type) {
×
68
        case *chatter.Prompt:
×
NEW
69
                jsonify.Strings.Harden(v, nil)
×
70
        }
71

72
        return prompt, nil
×
73
}
74

75
func (w *Jsonify[A]) decode(reply *chatter.Reply) (float64, []string, error) {
×
76
        var seq []string
×
NEW
77
        if err := jsonify.Strings.Decode(reply, nil, &seq); err != nil {
×
78
                return 0.0, nil, err
×
79
        }
×
80

81
        if err := w.validator(seq); err != nil {
×
82
                return 0.1, nil, err
×
83
        }
×
84

85
        return 1.0, seq, nil
×
86
}
87

88
func (w *Jsonify[A]) deduct(state thinker.State[[]string]) (thinker.Phase, chatter.Message, error) {
×
89
        // Provide feedback to LLM if there are no confidence about the results
×
90
        if state.Feedback != nil && state.Confidence < 1.0 {
×
91
                var prompt chatter.Prompt
×
92
                prompt.WithTask("Refine the previous request using the feedback below.")
×
93
                prompt.With(state.Feedback)
×
94
                return thinker.AGENT_REFINE, &prompt, nil
×
95
        }
×
96

97
        // We have sufficient confidence, return results
98
        return thinker.AGENT_RETURN, nil, nil
×
99
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc