• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

source-academy / backend / 9c2823cf899c9314fd4ccec9dd6c3b589d83e839

04 Dec 2025 05:47PM UTC coverage: 88.716% (-0.9%) from 89.621%
9c2823cf899c9314fd4ccec9dd6c3b589d83e839

push

github

web-flow
AI-powered marking (#1248)

* feat: v1 of AI-generated comments

* feat: added logging of inputs and outputs

* Update generate_ai_comments.ex

* feat: function to save outputs to database

* Format answers json before sending to LLM

* Add LLM Prompt to question params when submitting assessment xml file

* Add LLM Prompt to api response when grading view is open

* feat: added llm_prompt from qn to raw_prompt

* feat: enabling/disabling of LLM feature by course level

* feat: added llm_grading boolean field to course creation API

* feat: added api key storage in courses & edit api key/enable llm grading

* feat: encryption for llm_api_key

* feat: added final comment editing route

* feat: added logging of chosen comments

* fix: bugs when certain fields were missing

* feat: updated tests

* formatting

* fix: error handling when calling openai API

* fix: credo issues

* formatting

* Address some comments

* Fix formatting

* rm IO.inspect

* a

* Use case instead of if

* Streamlines generate_ai_comments to only send the selected question and its relevant info + use the correct llm_prompt

* Remove unncessary field

* default: false for llm_grading

* Add proper linking between ai_comments table and submissions. Return it to submission retrieval as well

* Resolve some migration comments

* Add llm_model and llm_api_url to the DB + schema

* Moves api key, api url, llm model and course prompt to course level

* Add encryption_key to env

* Do not hardcode formatting instructions

* Add Assessment level prompts to the XML

* Return some additional info for composing of prompts

* Remove un-used 'save comments'

* Fix existing assessment tests

* Fix generate_ai_comments test cases

* Fix bug preventing avengers from generating ai comments

* Fix up tests + error msgs

* Formatting

* some mix credo suggestions

* format

* Fix credo issue

* bug fix + credo fixes

* Fix tests

* format

* Modify test.exs

* Update lib/cadet_web/controllers/gener... (continued)

118 of 174 new or added lines in 9 files covered. (67.82%)

1 existing line in 1 file now uncovered.

3758 of 4236 relevant lines covered (88.72%)

7103.93 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

69.05
/lib/cadet_web/controllers/generate_ai_comments.ex
1
defmodule CadetWeb.AICodeAnalysisController do
2
  use CadetWeb, :controller
3
  use PhoenixSwagger
4
  require HTTPoison
5
  require Logger
6

7
  alias Cadet.{Assessments, AIComments, Courses}
8
  alias CadetWeb.{AICodeAnalysisController, AICommentsHelpers}
9

10
  # For logging outputs to both database and file
11
  defp save_comment(answer_id, raw_prompt, answers_json, response, error \\ nil) do
12
    # Log to database
13
    attrs = %{
3✔
14
      answer_id: answer_id,
15
      raw_prompt: raw_prompt,
16
      answers_json: answers_json,
17
      response: response,
18
      error: error
19
    }
20

21
    # Check if a comment already exists for the given answer_id
22
    case AIComments.get_latest_ai_comment(answer_id) do
3✔
23
      nil ->
24
        # If no existing comment, create a new one
25
        case AIComments.create_ai_comment(attrs) do
2✔
26
          {:ok, comment} ->
2✔
27
            {:ok, comment}
28

29
          {:error, changeset} ->
NEW
30
            Logger.error("Failed to log AI comment to database: #{inspect(changeset.errors)}")
×
31
            {:error, changeset}
32
        end
33

34
      existing_comment ->
35
        # Convert the existing comment struct to a map before merging
36
        updated_attrs = Map.merge(Map.from_struct(existing_comment), attrs)
1✔
37

38
        case AIComments.update_ai_comment(existing_comment.id, updated_attrs) do
1✔
39
          {:error, :not_found} ->
NEW
40
            Logger.error("AI comment to update not found in database")
×
41
            {:error, :not_found}
42

43
          {:ok, updated_comment} ->
1✔
44
            {:ok, updated_comment}
45

46
          {:error, changeset} ->
NEW
47
            Logger.error("Failed to update AI comment in database: #{inspect(changeset.errors)}")
×
48
            {:error, changeset}
49
        end
50
    end
51
  end
52

53
  defp check_llm_grading_parameters(llm_api_key, llm_model, llm_api_url, llm_course_level_prompt) do
54
    cond do
4✔
55
      is_nil(llm_model) or llm_model == "" ->
4✔
56
        {:parameter_error, "LLM model is not configured for this course"}
57

58
      is_nil(llm_api_url) or llm_api_url == "" ->
4✔
59
        {:parameter_error, "LLM API URL is not configured for this course"}
60

61
      is_nil(llm_course_level_prompt) or llm_course_level_prompt == "" ->
4✔
62
        {:parameter_error, "LLM course-level prompt is not configured for this course"}
63

64
      true ->
4✔
65
        {:ok}
4✔
66
    end
67
  end
68

69
  defp ensure_llm_enabled(course) do
70
    if course.enable_llm_grading do
4✔
71
      {:ok}
4✔
72
    else
73
      {:error, {:forbidden, "LLM grading is not enabled for this course"}}
74
    end
75
  end
76

77
  @doc """
78
  Fetches the question details and answers based on answer_id and generates AI-generated comments.
79
  """
80
  def generate_ai_comments(conn, %{
81
        "answer_id" => answer_id,
82
        "course_id" => course_id
83
      })
84
      when is_ecto_id(answer_id) do
85
    with {answer_id_parsed, ""} <- Integer.parse(answer_id),
4✔
86
         {:ok, course} <- Courses.get_course_config(course_id),
4✔
87
         {:ok} <- ensure_llm_enabled(course),
4✔
88
         {:ok, key} <- AICommentsHelpers.decrypt_llm_api_key(course.llm_api_key),
4✔
89
         {:ok} <-
4✔
90
           check_llm_grading_parameters(
91
             key,
92
             course.llm_model,
4✔
93
             course.llm_api_url,
4✔
94
             course.llm_course_level_prompt
4✔
95
           ),
96
         {:ok, answer} <- Assessments.get_answer(answer_id_parsed) do
4✔
97
      # Get head of answers (should only be one answer for given submission
98
      # and question since we filter to only 1 question)
99
      analyze_code(
3✔
100
        conn,
101
        %{
102
          answer: answer,
103
          api_key: key,
104
          llm_model: course.llm_model,
3✔
105
          llm_api_url: course.llm_api_url,
3✔
106
          course_prompt: course.llm_course_level_prompt,
3✔
107
          assessment_prompt: Assessments.get_llm_assessment_prompt(answer.question_id)
3✔
108
        }
109
      )
110
    else
111
      :error ->
112
        conn
113
        |> put_status(:bad_request)
NEW
114
        |> text("Invalid question ID format")
×
115

116
      {:decrypt_error, err} ->
117
        conn
118
        |> put_status(:internal_server_error)
NEW
119
        |> text("Failed to decrypt LLM API key")
×
120

121
      # Errors for check_llm_grading_parameters
122
      {:parameter_error, error_msg} ->
123
        conn
124
        |> put_status(:bad_request)
NEW
125
        |> text(error_msg)
×
126

127
      {:error, {status, message}} ->
128
        conn
129
        |> put_status(status)
130
        |> text(message)
1✔
131
    end
132
  end
133

134
  defp format_student_answer(answer) do
135
    """
3✔
136
    **Student Answer:**
137
    ```
138
    #{answer.answer["code"] || "N/A"}
3✔
139
    ```
140
    """
141
  end
142

143
  defp format_system_prompt(course_prompt, assessment_prompt, answer) do
144
    "**Course Level Prompt:**\n\n" <>
3✔
145
      (course_prompt || "") <>
3✔
146
      "\n\n**Assessment Level Prompt:**" <>
147
      (assessment_prompt || "") <>
3✔
148
      "\n\n" <>
149
      """
150
      **Additional Instructions for this Question:**
151
      #{answer.question.question["llm_prompt"] || "N/A"}
3✔
152

153
      **Question:**
154
      ```
155
      #{answer.question.question["content"] || "N/A"}
3✔
156
      ```
157

158
      **Model Solution:**
159
      ```
160
      #{answer.question.question["solution"] || "N/A"}
3✔
161
      ```
162

163
      **Autograding Status:** #{answer.autograding_status || "N/A"}
3✔
164
      **Autograding Results:** #{format_autograding_results(answer.autograding_results)}
3✔
165

166
      The student answer will be given below as part of the User Prompt.
167
      """
168
  end
169

170
  def create_final_messages(
171
        course_prompt,
172
        assessment_prompt,
173
        answer
174
      ) do
175
    formatted_answer =
3✔
176
      answer
177
      |> format_student_answer()
178
      |> Jason.encode!()
179

180
    [
181
      %{role: "system", content: format_system_prompt(course_prompt, assessment_prompt, answer)},
182
      %{role: "user", content: formatted_answer}
183
    ]
184
  end
185

NEW
186
  defp format_autograding_results(nil), do: "N/A"
×
187

188
  defp format_autograding_results(results) when is_list(results) do
189
    Enum.map_join(results, "; ", fn result ->
3✔
NEW
190
      "Error: #{result["errorMessage"] || "N/A"}, Type: #{result["errorType"] || "N/A"}"
×
191
    end)
192
  end
193

NEW
194
  defp format_autograding_results(results), do: inspect(results)
×
195

196
  defp analyze_code(
197
         conn,
198
         %{
199
           answer: answer,
200
           api_key: api_key,
201
           llm_model: llm_model,
202
           llm_api_url: llm_api_url,
203
           course_prompt: course_prompt,
204
           assessment_prompt: assessment_prompt
205
         }
206
       ) do
207
    # Combine prompts if llm_prompt exists
208
    final_messages =
3✔
209
      create_final_messages(
210
        course_prompt,
211
        assessment_prompt,
212
        answer
213
      )
214

215
    input =
3✔
216
      [
217
        model: llm_model,
218
        messages: final_messages
219
      ]
220

221
    case OpenAI.chat_completion(input, %OpenAI.Config{
3✔
222
           api_url: llm_api_url,
223
           api_key: api_key,
224
           http_options: [
225
             # connect timeout
226
             timeout: 60_000,
227
             # response timeout
228
             recv_timeout: 60_000
229
           ]
230
         }) do
231
      {:ok, %{choices: [%{"message" => %{"content" => content}} | _]}} ->
232
        save_comment(
2✔
233
          answer.id,
2✔
234
          Enum.at(final_messages, 0).content,
2✔
235
          Enum.at(final_messages, 1).content,
2✔
236
          content
237
        )
238

239
        comments_list = String.split(content, "|||")
2✔
240

241
        filtered_comments =
2✔
242
          Enum.filter(comments_list, fn comment ->
243
            String.trim(comment) != ""
4✔
244
          end)
245

246
        json(conn, %{"comments" => filtered_comments})
2✔
247

248
      {:ok, other} ->
249
        save_comment(
1✔
250
          answer.id,
1✔
251
          Enum.at(final_messages, 0).content,
1✔
252
          Enum.at(final_messages, 1).content,
1✔
253
          Jason.encode!(other),
254
          "Unexpected JSON shape"
255
        )
256

257
        conn
258
        |> put_status(:bad_gateway)
259
        |> text("Unexpected response format from LLM")
1✔
260

261
      {:error, reason} ->
NEW
262
        save_comment(
×
NEW
263
          answer.id,
×
NEW
264
          Enum.at(final_messages, 0).content,
×
NEW
265
          Enum.at(final_messages, 1).content,
×
266
          nil,
267
          inspect(reason)
268
        )
269

270
        conn
271
        |> put_status(:internal_server_error)
NEW
272
        |> text("LLM request error: #{inspect(reason)}")
×
273
    end
274
  end
275

276
  @doc """
277
  Saves the final comment chosen for a submission.
278
  """
279
  def save_final_comment(conn, %{
280
        "answer_id" => answer_id,
281
        "comment" => comment
282
      }) do
NEW
283
    case AIComments.update_final_comment(answer_id, comment) do
×
284
      {:ok, _updated_comment} ->
NEW
285
        json(conn, %{"status" => "success"})
×
286

287
      {:error, changeset} ->
288
        conn
289
        |> put_status(:unprocessable_entity)
NEW
290
        |> text("Failed to save final comment")
×
291
    end
292
  end
293

NEW
294
  swagger_path :generate_ai_comments do
×
295
    post("/courses/{course_id}/admin/generate-comments/{answer_id}")
296

297
    summary("Generate AI comments for a given submission.")
298

299
    security([%{JWT: []}])
300

301
    consumes("application/json")
302
    produces("application/json")
303

304
    parameters do
305
      course_id(:path, :integer, "course id", required: true)
306
      answer_id(:path, :integer, "answer id", required: true)
307
    end
308

309
    response(200, "OK", Schema.ref(:GenerateAIComments))
310
    response(400, "Invalid or missing parameter(s) or submission and/or question not found")
311
    response(401, "Unauthorized")
312
    response(403, "Forbidden")
313
    response(403, "LLM grading is not enabled for this course")
314
  end
315

NEW
316
  swagger_path :save_final_comment do
×
317
    post("/courses/{course_id}/admin/save-final-comment/{answer_id}")
318

319
    summary("Save the final comment chosen for a submission.")
320

321
    security([%{JWT: []}])
322

323
    consumes("application/json")
324
    produces("application/json")
325

326
    parameters do
327
      course_id(:path, :integer, "course id", required: true)
328
      answer_id(:path, :integer, "answer id", required: true)
329
      comment(:body, :string, "The final comment to save", required: true)
330
    end
331

332
    response(200, "OK", Schema.ref(:SaveFinalComment))
333
    response(400, "Invalid or missing parameter(s)")
334
    response(401, "Unauthorized")
335
    response(403, "Forbidden")
336
  end
337

338
  def swagger_definitions do
NEW
339
    %{
×
340
      GenerateAIComments:
NEW
341
        swagger_schema do
×
NEW
342
          properties do
×
NEW
343
            comments(:string, "AI-generated comments on the submission answers")
×
344
          end
345
        end,
346
      SaveFinalComment:
NEW
347
        swagger_schema do
×
NEW
348
          properties do
×
NEW
349
            status(:string, "Status of the operation")
×
350
          end
351
        end
352
    }
353
  end
354
end
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc