• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

neurobagel / query-tool-ai / 10554507384

26 Aug 2024 06:07AM UTC coverage: 93.846% (+0.8%) from 93.059%
10554507384

push

github

web-flow
[MNT] Modifications to Code to Ensure Frontend Compatibility (#40)

* Modifications to Code to Ensure Frontend Compatibility

* Minor change

* Added some tests

* Minor change

27 of 32 new or added lines in 6 files covered. (84.38%)

1 existing line in 1 file now uncovered.

366 of 390 relevant lines covered (93.85%)

1.86 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.11
/app/llm_processing/extractions.py
1
from langchain_community.chat_models import ChatOllama
2✔
2
from langchain_core.pydantic_v1 import BaseModel, Field
2✔
3
from langchain_core.output_parsers import JsonOutputParser
2✔
4
from langchain_core.prompts import PromptTemplate
2✔
5
from typing import Optional, Union
2✔
6
from tenacity import retry, stop_after_attempt
2✔
7

8

9
class Parameters(BaseModel):
2✔
10
    """
1✔
11
    Parameters for information extraction.
12

13
    Attributes:
14
        max_age (Optional[str]): Maximum age if specified.
15
        min_age (Optional[str]): Minimum age if specified.
16
        sex (Optional[str]): Sex.
17
        diagnosis (Optional[str]): Diagnosis.
18
        is_control (Optional[bool]): Healthy control subjects.
19
        min_num_imaging_sessions (Optional[str]): Minimum number of imaging sessions.
20
        min_num_phenotypic_sessions (Optional[str]): Minimum number of phenotypic sessions.
21
        assessment (Optional[str]): Assessment tool used or assessed with.
22
        image_modal (Optional[str]): Image modal.
23
    """
24

25
    max_age: Optional[str] = Field(
2✔
26
        description="maximum age (upper age limit) if specified", default=None
27
    )
28
    min_age: Optional[str] = Field(
2✔
29
        description="minimum age (lower age limit) if specified", default=None
30
    )
31
    sex: Optional[str] = Field(
2✔
32
        description="sex, only accepts 'male' or 'female' or 'other'",
33
        default=None,
34
        examples=["male", "female", "other"],
35
    )
36
    diagnosis: Optional[str] = Field(description="diagnosis", default=None)
2✔
37
    is_control: Optional[bool] = Field(
2✔
38
        description="healthy control subjects", default=None
39
    )
40
    min_num_imaging_sessions: Optional[str] = Field(
2✔
41
        description="minimum number of imaging sessions", default=None
42
    )
43
    min_num_phenotypic_sessions: Optional[str] = Field(
2✔
44
        description="minimum number of phenotypic sessions", default=None
45
    )
46
    assessment: Optional[str] = Field(
2✔
47
        description="assessment tool used or assessed with", default=None
48
    )
49
    image_modal: Optional[str] = Field(description="image modal", default=None)
2✔
50

51

52
@retry(stop=stop_after_attempt(3))
2✔
53
def extract_information(context: str) -> Optional[Union[dict, str, None]]:
2✔
54
    """
55
    Extract information using LangChain pipeline with retry mechanism.
56

57
    Args:
58
        context (str): Input context from which information is to be extracted.
59

60
    Returns:
61
        dict or str: Extracted information structured according to Parameters schema,
62
                    or error message if validation fails.
63
    """
64

65
    # Return empty dictionary if context is empty string
66
    if context == "":
2✔
67
        return {}
2✔
68

69
    # Initialize LLM (ChatOllama)
70
    llm = ChatOllama(model="mistral")
2✔
71

72
    # Initialize JSON output parser
73
    parser = JsonOutputParser(pydantic_object=Parameters)
2✔
74

75
    # Define prompt template
76
    prompt = PromptTemplate(
2✔
77
        template="Just extract the information as specified.\n{format_instructions}\n{context}\nIf not mentioned, put null.",
78
        input_variables=["context"],
79
        partial_variables={
80
            "format_instructions": parser.get_format_instructions()
81
        },
82
    )
83

84
    try:
2✔
85
        # Create extraction chain
86
        chain = prompt | llm | parser
2✔
87

88
        # Invoke chain with provided context
89
        response = chain.invoke({"context": context})
2✔
90

91
        # Ensure the order of keys matches the Parameters model
92
        ordered_response = {
2✔
93
            field: response.get(field, None)
94
            for field in Parameters.__fields__.keys()
95
        }
96

97
        # Filter out keys where the value is None or 'None' (string)
98
        filtered_ordered_response = {
2✔
99
            k: (
100
                float(v)
101
                if k in ["min_age", "max_age"] and v is not None
102
                else (
103
                    int(v)
104
                    if k
105
                    in [
106
                        "min_num_phenotypic_sessions",
107
                        "min_num_imaging_sessions",
108
                    ]
109
                    and v is not None
110
                    else (
111
                        v.lower()
112
                        if isinstance(v, str)
113
                        and k
114
                        in ["diagnosis", "assessment", "image_modal", "sex"]
115
                        else v
116
                    )
117
                )
118
            )
119
            for k, v in ordered_response.items()
120
            if v is not None and v != "None"
121
        }
122

123
        if "diagnosis" in filtered_ordered_response:
2✔
124
            if "is_control" not in filtered_ordered_response:
2✔
UNCOV
125
                filtered_ordered_response["is_control"] = False
×
126

127
        # Return the filtered ordered information as a dictionary
128
        return filtered_ordered_response
2✔
129

130
    except Exception:
×
131
        raise  # This will trigger the retry
×
132

133
    print(
134
        "Sorry the model failed to understand the query. Could you be more precise?"
135
    )
136
    return {}
137

138

139
def main():
2✔
140
    while True:
2✔
141
        user_query = input("Enter user query (or 'exit' to quit): ")
2✔
142
        if user_query.lower() == "exit":
2✔
143
            break
2✔
144

145
        response = extract_information(user_query)
2✔
146
        print("Model response:", response)
2✔
147
        print("")
2✔
148

149

150
if __name__ == "__main__":
2✔
151
    main()
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc