• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vzakharchenko / forge-sql-orm / 14390590765

10 Apr 2025 09:23PM UTC coverage: 78.699% (+2.2%) from 76.514%
14390590765

Pull #41

github

web-flow
Merge 0fd5d2821 into 0fa3fd4a5
Pull Request #41: 2.0.19

223 of 283 branches covered (78.8%)

Branch coverage included in aggregate %.

184 of 215 new or added lines in 8 files covered. (85.58%)

3 existing lines in 2 files now uncovered.

926 of 1177 relevant lines covered (78.67%)

19.11 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.3
/src/core/ForgeSQLAnalyseOperations.ts
1
import { ForgeSqlOperation, SchemaAnalyzeForgeSql } from "./ForgeSQLQueryBuilder";
2
import { Query } from "drizzle-orm";
3
import {
4
  ClusterStatementRowCamelCase,
5
  ExplainAnalyzeRow,
6
  SlowQueryNormalized,
7
} from "./SystemTables";
8
import { SqlParameters } from "@forge/sql/out/sql-statement";
9
import { AnyMySqlTable } from "drizzle-orm/mysql-core/index";
10
import { getTableName } from "drizzle-orm/table";
2✔
11
import moment from "moment";
2✔
12

13
/**
14
 * Interface representing a row from the EXPLAIN ANALYZE output
15
 */
16
interface DecodedPlanRow {
17
  id: string;
18
  estRows?: string;
19
  estCost?: string;
20
  actRows?: string;
21
  task?: string;
22
  "access object"?: string;
23
  "execution info"?: string;
24
  "operator info"?: string;
25
  memory?: string;
26
  disk?: string;
27
}
28

29
/**
30
 * Interface representing a raw slow query row from the database
31
 */
32
interface SlowQueryRaw {
33
  Time: string;
34
  Txn_start_ts: number;
35
  User: string;
36
  Host: string;
37
  Conn_ID: number;
38
  DB: string;
39
  Query: string;
40
  Digest: string;
41
  Query_time: number;
42
  Compile_time: number;
43
  Optimize_time: number;
44
  Process_time: number;
45
  Wait_time: number;
46
  Parse_time: number;
47
  Rewrite_time: number;
48
  Cop_time: number;
49
  Cop_proc_avg: number;
50
  Cop_proc_max: number;
51
  Cop_proc_p90: number;
52
  Cop_proc_addr: string;
53
  Cop_wait_avg: number;
54
  Cop_wait_max: number;
55
  Cop_wait_p90: number;
56
  Cop_wait_addr: string;
57
  Mem_max: number;
58
  Disk_max: number;
59
  Total_keys: number;
60
  Process_keys: number;
61
  Request_count: number;
62
  KV_total: number;
63
  PD_total: number;
64
  Result_rows: number;
65
  Rocksdb_block_cache_hit_count: number;
66
  Rocksdb_block_read_count: number;
67
  Rocksdb_block_read_byte: number;
68
  Plan: string;
69
  Binary_plan: string;
70
  Plan_digest: string;
71
}
72

73
/**
74
 * Interface representing a row from the cluster statements table
75
 */
76
export interface ClusterStatementRow {
77
  INSTANCE: string;
78
  SUMMARY_BEGIN_TIME: string;
79
  SUMMARY_END_TIME: string;
80
  STMT_TYPE: string;
81
  SCHEMA_NAME: string;
82
  DIGEST: string;
83
  DIGEST_TEXT: string;
84
  TABLE_NAMES: string;
85
  INDEX_NAMES: string | null;
86
  SAMPLE_USER: string;
87
  EXEC_COUNT: number;
88
  SUM_ERRORS: number;
89
  SUM_WARNINGS: number;
90
  SUM_LATENCY: number;
91
  MAX_LATENCY: number;
92
  MIN_LATENCY: number;
93
  AVG_LATENCY: number;
94
  AVG_PARSE_LATENCY: number;
95
  MAX_PARSE_LATENCY: number;
96
  AVG_COMPILE_LATENCY: number;
97
  MAX_COMPILE_LATENCY: number;
98
  SUM_COP_TASK_NUM: number;
99
  MAX_COP_PROCESS_TIME: number;
100
  MAX_COP_PROCESS_ADDRESS: string;
101
  MAX_COP_WAIT_TIME: number;
102
  MAX_COP_WAIT_ADDRESS: string;
103
  AVG_PROCESS_TIME: number;
104
  MAX_PROCESS_TIME: number;
105
  AVG_WAIT_TIME: number;
106
  MAX_WAIT_TIME: number;
107
  AVG_BACKOFF_TIME: number;
108
  MAX_BACKOFF_TIME: number;
109
  AVG_TOTAL_KEYS: number;
110
  MAX_TOTAL_KEYS: number;
111
  AVG_PROCESSED_KEYS: number;
112
  MAX_PROCESSED_KEYS: number;
113
  AVG_ROCKSDB_DELETE_SKIPPED_COUNT: number;
114
  MAX_ROCKSDB_DELETE_SKIPPED_COUNT: number;
115
  AVG_ROCKSDB_KEY_SKIPPED_COUNT: number;
116
  MAX_ROCKSDB_KEY_SKIPPED_COUNT: number;
117
  AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
118
  MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
119
  AVG_ROCKSDB_BLOCK_READ_COUNT: number;
120
  MAX_ROCKSDB_BLOCK_READ_COUNT: number;
121
  AVG_ROCKSDB_BLOCK_READ_BYTE: number;
122
  MAX_ROCKSDB_BLOCK_READ_BYTE: number;
123
  AVG_PREWRITE_TIME: number;
124
  MAX_PREWRITE_TIME: number;
125
  AVG_COMMIT_TIME: number;
126
  MAX_COMMIT_TIME: number;
127
  AVG_GET_COMMIT_TS_TIME: number;
128
  MAX_GET_COMMIT_TS_TIME: number;
129
  AVG_COMMIT_BACKOFF_TIME: number;
130
  MAX_COMMIT_BACKOFF_TIME: number;
131
  AVG_RESOLVE_LOCK_TIME: number;
132
  MAX_RESOLVE_LOCK_TIME: number;
133
  AVG_LOCAL_LATCH_WAIT_TIME: number;
134
  MAX_LOCAL_LATCH_WAIT_TIME: number;
135
  AVG_WRITE_KEYS: number;
136
  MAX_WRITE_KEYS: number;
137
  AVG_WRITE_SIZE: number;
138
  MAX_WRITE_SIZE: number;
139
  AVG_PREWRITE_REGIONS: number;
140
  MAX_PREWRITE_REGIONS: number;
141
  AVG_TXN_RETRY: number;
142
  MAX_TXN_RETRY: number;
143
  SUM_EXEC_RETRY: number;
144
  SUM_EXEC_RETRY_TIME: number;
145
  SUM_BACKOFF_TIMES: number;
146
  BACKOFF_TYPES: string | null;
147
  AVG_MEM: number;
148
  MAX_MEM: number;
149
  AVG_DISK: number;
150
  MAX_DISK: number;
151
  AVG_KV_TIME: number;
152
  AVG_PD_TIME: number;
153
  AVG_BACKOFF_TOTAL_TIME: number;
154
  AVG_WRITE_SQL_RESP_TIME: number;
155
  AVG_TIDB_CPU_TIME: number;
156
  AVG_TIKV_CPU_TIME: number;
157
  MAX_RESULT_ROWS: number;
158
  MIN_RESULT_ROWS: number;
159
  AVG_RESULT_ROWS: number;
160
  PREPARED: number;
161
  AVG_AFFECTED_ROWS: number;
162
  FIRST_SEEN: string;
163
  LAST_SEEN: string;
164
  PLAN_IN_CACHE: number;
165
  PLAN_CACHE_HITS: number;
166
  PLAN_IN_BINDING: number;
167
  QUERY_SAMPLE_TEXT: string;
168
  PREV_SAMPLE_TEXT: string;
169
  PLAN_DIGEST: string;
170
  PLAN: string;
171
  BINARY_PLAN: string;
172
  CHARSET: string;
173
  COLLATION: string;
174
  PLAN_HINT: string;
175
  MAX_REQUEST_UNIT_READ: number;
176
  AVG_REQUEST_UNIT_READ: number;
177
  MAX_REQUEST_UNIT_WRITE: number;
178
  AVG_REQUEST_UNIT_WRITE: number;
179
  MAX_QUEUED_RC_TIME: number;
180
  AVG_QUEUED_RC_TIME: number;
181
  RESOURCE_GROUP: string;
182
  PLAN_CACHE_UNQUALIFIED: number;
183
  PLAN_CACHE_UNQUALIFIED_LAST_REASON: string;
184
}
185

186
/**
187
 * Class implementing SQL analysis operations for ForgeSQL ORM.
188
 * Provides methods for analyzing query performance, execution plans, and slow queries.
189
 */
190
export class ForgeSQLAnalyseOperation implements SchemaAnalyzeForgeSql {
2✔
191
  private readonly forgeOperations: ForgeSqlOperation;
32✔
192

193
  /**
194
   * Creates a new instance of ForgeSQLAnalizeOperation.
195
   * @param {ForgeSqlOperation} forgeOperations - The ForgeSQL operations instance
196
   */
197
  constructor(forgeOperations: ForgeSqlOperation) {
32✔
198
    this.forgeOperations = forgeOperations;
32✔
199
    this.mapToCamelCaseClusterStatement = this.mapToCamelCaseClusterStatement.bind(this);
32✔
200
  }
32✔
201

202
  /**
203
   * Executes EXPLAIN on a raw SQL query.
204
   * @param {string} query - The SQL query to analyze
205
   * @param {unknown[]} bindParams - The query parameters
206
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
207
   */
208
  async explainRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
32✔
NEW
209
    const results = await this.forgeOperations
×
NEW
210
      .fetch()
×
NEW
211
      .executeRawSQL<DecodedPlanRow>(`EXPLAIN ${query}`, bindParams as SqlParameters);
×
NEW
212
    return results.map((row) => ({
×
NEW
213
      id: row.id,
×
NEW
214
      estRows: row.estRows,
×
NEW
215
      actRows: row.actRows,
×
NEW
216
      task: row.task,
×
NEW
217
      accessObject: row["access object"],
×
NEW
218
      executionInfo: row["execution info"],
×
NEW
219
      operatorInfo: row["operator info"],
×
NEW
220
      memory: row.memory,
×
NEW
221
      disk: row.disk,
×
NEW
222
    }));
×
NEW
223
  }
×
224

225
  /**
226
   * Executes EXPLAIN on a Drizzle query.
227
   * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
228
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
229
   */
230
  async explain(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
32✔
NEW
231
    const { sql, params } = query.toSQL();
×
NEW
232
    return this.explainRaw(sql, params);
×
NEW
233
  }
×
234

235
  /**
236
   * Executes EXPLAIN ANALYZE on a raw SQL query.
237
   * @param {string} query - The SQL query to analyze
238
   * @param {unknown[]} bindParams - The query parameters
239
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
240
   */
241
  async explainAnalyzeRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
32✔
242
    const results = await this.forgeOperations
6✔
243
      .fetch()
6✔
244
      .executeRawSQL<DecodedPlanRow>(`EXPLAIN ANALYZE ${query}`, bindParams as SqlParameters);
6✔
245
    return results.map((row) => ({
6✔
246
      id: row.id,
4✔
247
      estRows: row.estRows,
4✔
248
      actRows: row.actRows,
4✔
249
      task: row.task,
4✔
250
      accessObject: row["access object"],
4✔
251
      executionInfo: row["execution info"],
4✔
252
      operatorInfo: row["operator info"],
4✔
253
      memory: row.memory,
4✔
254
      disk: row.disk,
4✔
255
    }));
6✔
256
  }
6✔
257

258
  /**
259
   * Executes EXPLAIN ANALYZE on a Drizzle query.
260
   * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
261
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
262
   */
263
  async explainAnalyze(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
32✔
264
    const { sql, params } = query.toSQL();
2✔
265
    return this.explainAnalyzeRaw(sql, params);
2✔
266
  }
2✔
267

268
  /**
269
   * Decodes a query execution plan from its string representation.
270
   * @param {string} input - The raw execution plan string
271
   * @returns {ExplainAnalyzeRow[]} The decoded execution plan rows
272
   */
273
  decodedPlan(input: string): ExplainAnalyzeRow[] {
32✔
274
    if (!input) {
18✔
275
      return [];
6✔
276
    }
6✔
277
    const lines = input.trim().split("\n");
12✔
278
    if (lines.length < 2) return [];
18✔
279

280
    const headersRaw = lines[0]
2✔
281
      .split("\t")
2✔
282
      .map((h) => h.trim())
2✔
283
      .filter(Boolean);
2✔
284
    const headers = headersRaw.map((h) => {
2✔
285
      return h
18✔
286
        .replace(/\s+/g, " ")
18✔
287
        .replace(/[-\s]+(.)?/g, (_, c) => (c ? c.toUpperCase() : ""))
18!
288
        .replace(/^./, (s) => s.toLowerCase());
18✔
289
    });
2✔
290

291
    return lines.slice(1).map((line) => {
2✔
292
      const values = line
2✔
293
        .split("\t")
2✔
294
        .map((s) => s.trim())
2✔
295
        .filter(Boolean);
2✔
296
      const row: Record<string, string> = {};
2✔
297
      headers.forEach((key, i) => {
2✔
298
        row[key] = values[i] ?? "";
18!
299
      });
2✔
300
      return row as unknown as ExplainAnalyzeRow;
2✔
301
    });
2✔
302
  }
18✔
303

304
  /**
305
   * Normalizes a raw slow query row into a more structured format.
306
   * @param {SlowQueryRaw} row - The raw slow query data
307
   * @returns {SlowQueryNormalized} The normalized slow query data
308
   */
309
  normalizeSlowQuery(row: SlowQueryRaw): SlowQueryNormalized {
32✔
310
    return {
4✔
311
      time: row.Time,
4✔
312
      txnStartTs: row.Txn_start_ts,
4✔
313
      user: row.User,
4✔
314
      host: row.Host,
4✔
315
      connId: row.Conn_ID,
4✔
316
      db: row.DB,
4✔
317
      query: row.Query,
4✔
318
      digest: row.Digest,
4✔
319
      queryTime: row.Query_time,
4✔
320
      compileTime: row.Compile_time,
4✔
321
      optimizeTime: row.Optimize_time,
4✔
322
      processTime: row.Process_time,
4✔
323
      waitTime: row.Wait_time,
4✔
324
      parseTime: row.Parse_time,
4✔
325
      rewriteTime: row.Rewrite_time,
4✔
326
      copTime: row.Cop_time,
4✔
327
      copProcAvg: row.Cop_proc_avg,
4✔
328
      copProcMax: row.Cop_proc_max,
4✔
329
      copProcP90: row.Cop_proc_p90,
4✔
330
      copProcAddr: row.Cop_proc_addr,
4✔
331
      copWaitAvg: row.Cop_wait_avg,
4✔
332
      copWaitMax: row.Cop_wait_max,
4✔
333
      copWaitP90: row.Cop_wait_p90,
4✔
334
      copWaitAddr: row.Cop_wait_addr,
4✔
335
      memMax: row.Mem_max,
4✔
336
      diskMax: row.Disk_max,
4✔
337
      totalKeys: row.Total_keys,
4✔
338
      processKeys: row.Process_keys,
4✔
339
      requestCount: row.Request_count,
4✔
340
      kvTotal: row.KV_total,
4✔
341
      pdTotal: row.PD_total,
4✔
342
      resultRows: row.Result_rows,
4✔
343
      rocksdbBlockCacheHitCount: row.Rocksdb_block_cache_hit_count,
4✔
344
      rocksdbBlockReadCount: row.Rocksdb_block_read_count,
4✔
345
      rocksdbBlockReadByte: row.Rocksdb_block_read_byte,
4✔
346
      plan: row.Plan,
4✔
347
      binaryPlan: row.Binary_plan,
4✔
348
      planDigest: row.Plan_digest,
4✔
349
      parsedPlan: this.decodedPlan(row.Plan),
4✔
350
    };
4✔
351
  }
4✔
352

353
  /**
354
   * Builds a SQL query for retrieving cluster statement history.
355
   * @param {string[]} tables - The tables to analyze
356
   * @param {Date} [from] - The start date for the analysis
357
   * @param {Date} [to] - The end date for the analysis
358
   * @returns {string} The SQL query for cluster statement history
359
   */
360
  buildClusterStatementQuery(tables: string[], from?: Date, to?: Date): string {
32✔
361
    const formatDateTime = (date: Date): string => moment(date).format("YYYY-MM-DDTHH:mm:ss.SSS");
8✔
362

363
    const tableConditions = tables
8✔
364
      .map((table) => `TABLE_NAMES LIKE CONCAT(SCHEMA_NAME, '.', '%', '${table}', '%')`)
8✔
365
      .join(" OR ");
8✔
366

367
    const timeConditions: string[] = [];
8✔
368
    if (from) {
8✔
369
      timeConditions.push(`SUMMARY_BEGIN_TIME >= '${formatDateTime(from)}'`);
4✔
370
    }
4✔
371
    if (to) {
8✔
372
      timeConditions.push(`SUMMARY_END_TIME <= '${formatDateTime(to)}'`);
4✔
373
    }
4✔
374

375
    let whereClauses;
8✔
376
    if (tableConditions?.length) {
8✔
377
      whereClauses = [tableConditions ? `(${tableConditions})` : "", ...timeConditions];
8!
378
    } else {
8!
NEW
379
      whereClauses = timeConditions;
×
NEW
380
    }
×
381

382
    return `
8✔
383
      SELECT *
384
      FROM (
385
        SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY
386
        UNION ALL
387
        SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
388
      ) AS combined
389
      ${whereClauses?.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : ""}
8!
390
    `;
391
  }
8✔
392

393
  /**
394
   * Retrieves and analyzes slow queries from the database.
395
   * @returns {Promise<SlowQueryNormalized[]>} The normalized slow query data
396
   */
397
  async analyzeSlowQueries(): Promise<SlowQueryNormalized[]> {
32✔
398
    const results = await this.forgeOperations.fetch().executeRawSQL<SlowQueryRaw>(`
2✔
399
      SELECT *
400
      FROM information_schema.slow_query
401
      ORDER BY time DESC
402
    `);
2✔
403
    return results.map((row) => this.normalizeSlowQuery(row));
2✔
404
  }
2✔
405

406
  /**
407
   * Converts a cluster statement row to camelCase format.
408
   * @param {Record<string, any>} input - The input row data
409
   * @returns {ClusterStatementRowCamelCase} The converted row data
410
   */
411
  mapToCamelCaseClusterStatement(input: Record<string, any>): ClusterStatementRowCamelCase {
32✔
412
    if (!input) {
8!
NEW
413
      return {} as ClusterStatementRowCamelCase;
×
NEW
414
    }
×
415

416
    const result: any = {};
8✔
417
    result.parsedPlan = this.decodedPlan(input["PLAN"] ?? "");
8✔
418
    for (const key in input) {
8✔
419
      const camelKey = key.toLowerCase().replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
18✔
420
      result[camelKey] = input[key];
18✔
421
    }
18✔
422

423
    return result as ClusterStatementRowCamelCase;
8✔
424
  }
8✔
425

426
  /**
427
   * Analyzes query history for specific tables using raw table names.
428
   * @param {string[]} tables - The table names to analyze
429
   * @param {Date} [fromDate] - The start date for the analysis
430
   * @param {Date} [toDate] - The end date for the analysis
431
   * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
432
   */
433
  async analyzeQueriesHistoryRaw(
32✔
434
    tables: string[],
4✔
435
    fromDate?: Date,
4✔
436
    toDate?: Date,
4✔
437
  ): Promise<ClusterStatementRowCamelCase[]> {
4✔
438
    const results = await this.forgeOperations
4✔
439
      .fetch()
4✔
440
      .executeRawSQL<ClusterStatementRow>(
4✔
441
        this.buildClusterStatementQuery(tables ?? [], fromDate, toDate),
4!
442
      );
4✔
443
    return results.map((r) => this.mapToCamelCaseClusterStatement(r));
4✔
444
  }
4✔
445

446
  /**
447
   * Analyzes query history for specific tables using Drizzle table objects.
448
   * @param {AnyMySqlTable[]} tables - The Drizzle table objects to analyze
449
   * @param {Date} [fromDate] - The start date for the analysis
450
   * @param {Date} [toDate] - The end date for the analysis
451
   * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
452
   */
453
  async analyzeQueriesHistory(
32✔
454
    tables: AnyMySqlTable[],
2✔
455
    fromDate?: Date,
2✔
456
    toDate?: Date,
2✔
457
  ): Promise<ClusterStatementRowCamelCase[]> {
2✔
458
    const tableNames = tables?.map((table) => getTableName(table)) ?? [];
2!
459
    return this.analyzeQueriesHistoryRaw(tableNames, fromDate, toDate);
2✔
460
  }
2✔
461
}
32✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc