• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vzakharchenko / forge-sql-orm / 14385988882

10 Apr 2025 04:53PM UTC coverage: 79.009% (+2.5%) from 76.514%
14385988882

Pull #41

github

web-flow
Merge 4ed548675 into 0fa3fd4a5
Pull Request #41: 2.0.19

223 of 278 branches covered (80.22%)

Branch coverage included in aggregate %.

183 of 212 new or added lines in 7 files covered. (86.32%)

2 existing lines in 2 files now uncovered.

925 of 1175 relevant lines covered (78.72%)

19.13 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.73
/src/core/ForgeSQLAnalyseOperations.ts
1
import { ForgeSqlOperation, SchemaAnalyzeForgeSql } from "./ForgeSQLQueryBuilder";
2
import { Query } from "drizzle-orm";
3
import {
4
  ClusterStatementRowCamelCase,
5
  ExplainAnalyzeRow,
6
  SlowQueryNormalized,
7
} from "./SystemTables";
8
import { SqlParameters } from "@forge/sql/out/sql-statement";
9
import { AnyMySqlTable } from "drizzle-orm/mysql-core/index";
10
import { getTableName } from "drizzle-orm/table";
2✔
11
import moment from "moment";
2✔
12

13
/**
14
 * Interface representing a row from the EXPLAIN ANALYZE output
15
 */
16
interface DecodedPlanRow {
17
  id: string;
18
  estRows?: string;
19
  estCost?: string;
20
  actRows?: string;
21
  task?: string;
22
  "access object"?: string;
23
  "execution info"?: string;
24
  "operator info"?: string;
25
  memory?: string;
26
  disk?: string;
27
}
28

29
/**
30
 * Interface representing a raw slow query row from the database
31
 */
32
interface SlowQueryRaw {
33
  Time: string;
34
  Txn_start_ts: number;
35
  User: string;
36
  Host: string;
37
  Conn_ID: number;
38
  DB: string;
39
  Query: string;
40
  Digest: string;
41
  Query_time: number;
42
  Compile_time: number;
43
  Optimize_time: number;
44
  Process_time: number;
45
  Wait_time: number;
46
  Parse_time: number;
47
  Rewrite_time: number;
48
  Cop_time: number;
49
  Cop_proc_avg: number;
50
  Cop_proc_max: number;
51
  Cop_proc_p90: number;
52
  Cop_proc_addr: string;
53
  Cop_wait_avg: number;
54
  Cop_wait_max: number;
55
  Cop_wait_p90: number;
56
  Cop_wait_addr: string;
57
  Mem_max: number;
58
  Disk_max: number;
59
  Total_keys: number;
60
  Process_keys: number;
61
  Request_count: number;
62
  KV_total: number;
63
  PD_total: number;
64
  Result_rows: number;
65
  Rocksdb_block_cache_hit_count: number;
66
  Rocksdb_block_read_count: number;
67
  Rocksdb_block_read_byte: number;
68
  Plan: string;
69
  Binary_plan: string;
70
  Plan_digest: string;
71
}
72

73
/**
74
 * Interface representing a row from the cluster statements table
75
 */
76
export interface ClusterStatementRow {
77
  INSTANCE: string;
78
  SUMMARY_BEGIN_TIME: string;
79
  SUMMARY_END_TIME: string;
80
  STMT_TYPE: string;
81
  SCHEMA_NAME: string;
82
  DIGEST: string;
83
  DIGEST_TEXT: string;
84
  TABLE_NAMES: string;
85
  INDEX_NAMES: string | null;
86
  SAMPLE_USER: string;
87
  EXEC_COUNT: number;
88
  SUM_ERRORS: number;
89
  SUM_WARNINGS: number;
90
  SUM_LATENCY: number;
91
  MAX_LATENCY: number;
92
  MIN_LATENCY: number;
93
  AVG_LATENCY: number;
94
  AVG_PARSE_LATENCY: number;
95
  MAX_PARSE_LATENCY: number;
96
  AVG_COMPILE_LATENCY: number;
97
  MAX_COMPILE_LATENCY: number;
98
  SUM_COP_TASK_NUM: number;
99
  MAX_COP_PROCESS_TIME: number;
100
  MAX_COP_PROCESS_ADDRESS: string;
101
  MAX_COP_WAIT_TIME: number;
102
  MAX_COP_WAIT_ADDRESS: string;
103
  AVG_PROCESS_TIME: number;
104
  MAX_PROCESS_TIME: number;
105
  AVG_WAIT_TIME: number;
106
  MAX_WAIT_TIME: number;
107
  AVG_BACKOFF_TIME: number;
108
  MAX_BACKOFF_TIME: number;
109
  AVG_TOTAL_KEYS: number;
110
  MAX_TOTAL_KEYS: number;
111
  AVG_PROCESSED_KEYS: number;
112
  MAX_PROCESSED_KEYS: number;
113
  AVG_ROCKSDB_DELETE_SKIPPED_COUNT: number;
114
  MAX_ROCKSDB_DELETE_SKIPPED_COUNT: number;
115
  AVG_ROCKSDB_KEY_SKIPPED_COUNT: number;
116
  MAX_ROCKSDB_KEY_SKIPPED_COUNT: number;
117
  AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
118
  MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
119
  AVG_ROCKSDB_BLOCK_READ_COUNT: number;
120
  MAX_ROCKSDB_BLOCK_READ_COUNT: number;
121
  AVG_ROCKSDB_BLOCK_READ_BYTE: number;
122
  MAX_ROCKSDB_BLOCK_READ_BYTE: number;
123
  AVG_PREWRITE_TIME: number;
124
  MAX_PREWRITE_TIME: number;
125
  AVG_COMMIT_TIME: number;
126
  MAX_COMMIT_TIME: number;
127
  AVG_GET_COMMIT_TS_TIME: number;
128
  MAX_GET_COMMIT_TS_TIME: number;
129
  AVG_COMMIT_BACKOFF_TIME: number;
130
  MAX_COMMIT_BACKOFF_TIME: number;
131
  AVG_RESOLVE_LOCK_TIME: number;
132
  MAX_RESOLVE_LOCK_TIME: number;
133
  AVG_LOCAL_LATCH_WAIT_TIME: number;
134
  MAX_LOCAL_LATCH_WAIT_TIME: number;
135
  AVG_WRITE_KEYS: number;
136
  MAX_WRITE_KEYS: number;
137
  AVG_WRITE_SIZE: number;
138
  MAX_WRITE_SIZE: number;
139
  AVG_PREWRITE_REGIONS: number;
140
  MAX_PREWRITE_REGIONS: number;
141
  AVG_TXN_RETRY: number;
142
  MAX_TXN_RETRY: number;
143
  SUM_EXEC_RETRY: number;
144
  SUM_EXEC_RETRY_TIME: number;
145
  SUM_BACKOFF_TIMES: number;
146
  BACKOFF_TYPES: string | null;
147
  AVG_MEM: number;
148
  MAX_MEM: number;
149
  AVG_DISK: number;
150
  MAX_DISK: number;
151
  AVG_KV_TIME: number;
152
  AVG_PD_TIME: number;
153
  AVG_BACKOFF_TOTAL_TIME: number;
154
  AVG_WRITE_SQL_RESP_TIME: number;
155
  AVG_TIDB_CPU_TIME: number;
156
  AVG_TIKV_CPU_TIME: number;
157
  MAX_RESULT_ROWS: number;
158
  MIN_RESULT_ROWS: number;
159
  AVG_RESULT_ROWS: number;
160
  PREPARED: number;
161
  AVG_AFFECTED_ROWS: number;
162
  FIRST_SEEN: string;
163
  LAST_SEEN: string;
164
  PLAN_IN_CACHE: number;
165
  PLAN_CACHE_HITS: number;
166
  PLAN_IN_BINDING: number;
167
  QUERY_SAMPLE_TEXT: string;
168
  PREV_SAMPLE_TEXT: string;
169
  PLAN_DIGEST: string;
170
  PLAN: string;
171
  BINARY_PLAN: string;
172
  CHARSET: string;
173
  COLLATION: string;
174
  PLAN_HINT: string;
175
  MAX_REQUEST_UNIT_READ: number;
176
  AVG_REQUEST_UNIT_READ: number;
177
  MAX_REQUEST_UNIT_WRITE: number;
178
  AVG_REQUEST_UNIT_WRITE: number;
179
  MAX_QUEUED_RC_TIME: number;
180
  AVG_QUEUED_RC_TIME: number;
181
  RESOURCE_GROUP: string;
182
  PLAN_CACHE_UNQUALIFIED: number;
183
  PLAN_CACHE_UNQUALIFIED_LAST_REASON: string;
184
}
185

186
/**
187
 * Class implementing SQL analysis operations for ForgeSQL ORM.
188
 * Provides methods for analyzing query performance, execution plans, and slow queries.
189
 */
190
export class ForgeSQLAnalyseOperation implements SchemaAnalyzeForgeSql {
2✔
191
  private readonly forgeOperations: ForgeSqlOperation;
32✔
192

193
  /**
194
   * Creates a new instance of ForgeSQLAnalizeOperation.
195
   * @param {ForgeSqlOperation} forgeOperations - The ForgeSQL operations instance
196
   */
197
  constructor(forgeOperations: ForgeSqlOperation) {
32✔
198
    this.forgeOperations = forgeOperations;
32✔
199
    this.mapToCamelCaseClusterStatement = this.mapToCamelCaseClusterStatement.bind(this);
32✔
200
  }
32✔
201

202
  /**
203
   * Executes EXPLAIN on a raw SQL query.
204
   * @param {string} query - The SQL query to analyze
205
   * @param {unknown[]} bindParams - The query parameters
206
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
207
   */
208
  async explainRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
32✔
NEW
209
    const results = await this.forgeOperations
×
NEW
210
      .fetch()
×
NEW
211
      .executeRawSQL<DecodedPlanRow>(`EXPLAIN ${query}`, bindParams as SqlParameters);
×
NEW
212
    console.error(results);
×
NEW
213
    return results.map((row) => ({
×
NEW
214
      id: row.id,
×
NEW
215
      estRows: row.estRows,
×
NEW
216
      actRows: row.actRows,
×
NEW
217
      task: row.task,
×
NEW
218
      accessObject: row["access object"],
×
NEW
219
      executionInfo: row["execution info"],
×
NEW
220
      operatorInfo: row["operator info"],
×
NEW
221
      memory: row.memory,
×
NEW
222
      disk: row.disk,
×
NEW
223
    }));
×
NEW
224
  }
×
225

226
  /**
227
   * Executes EXPLAIN on a Drizzle query.
228
   * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
229
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
230
   */
231
  async explain(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
32✔
NEW
232
    const { sql, params } = query.toSQL();
×
NEW
233
    return this.explainRaw(sql, params);
×
NEW
234
  }
×
235

236
  /**
237
   * Executes EXPLAIN ANALYZE on a raw SQL query.
238
   * @param {string} query - The SQL query to analyze
239
   * @param {unknown[]} bindParams - The query parameters
240
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
241
   */
242
  async explainAnalyzeRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
32✔
243
    const results = await this.forgeOperations
6✔
244
      .fetch()
6✔
245
      .executeRawSQL<DecodedPlanRow>(`EXPLAIN ANALYZE ${query}`, bindParams as SqlParameters);
6✔
246
    return results.map((row) => ({
6✔
247
      id: row.id,
4✔
248
      estRows: row.estRows,
4✔
249
      actRows: row.actRows,
4✔
250
      task: row.task,
4✔
251
      accessObject: row["access object"],
4✔
252
      executionInfo: row["execution info"],
4✔
253
      operatorInfo: row["operator info"],
4✔
254
      memory: row.memory,
4✔
255
      disk: row.disk,
4✔
256
    }));
6✔
257
  }
6✔
258

259
  /**
260
   * Executes EXPLAIN ANALYZE on a Drizzle query.
261
   * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
262
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
263
   */
264
  async explainAnalyze(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
32✔
265
    const { sql, params } = query.toSQL();
2✔
266
    return this.explainAnalyzeRaw(sql, params);
2✔
267
  }
2✔
268

269
  /**
270
   * Decodes a query execution plan from its string representation.
271
   * @param {string} input - The raw execution plan string
272
   * @returns {ExplainAnalyzeRow[]} The decoded execution plan rows
273
   */
274
  decodedPlan(input: string): ExplainAnalyzeRow[] {
32✔
275
    if (!input) {
18✔
276
      return [];
6✔
277
    }
6✔
278
    const lines = input.trim().split("\n");
12✔
279
    if (lines.length < 2) return [];
18✔
280

281
    const headersRaw = lines[0]
2✔
282
      .split("\t")
2✔
283
      .map((h) => h.trim())
2✔
284
      .filter(Boolean);
2✔
285
    const headers = headersRaw.map((h) => {
2✔
286
      return h
18✔
287
        .replace(/\s+/g, " ")
18✔
288
        .replace(/[-\s]+(.)?/g, (_, c) => (c ? c.toUpperCase() : ""))
18!
289
        .replace(/^./, (s) => s.toLowerCase());
18✔
290
    });
2✔
291

292
    return lines.slice(1).map((line) => {
2✔
293
      const values = line
2✔
294
        .split("\t")
2✔
295
        .map((s) => s.trim())
2✔
296
        .filter(Boolean);
2✔
297
      const row: Record<string, string> = {};
2✔
298
      headers.forEach((key, i) => {
2✔
299
        row[key] = values[i] ?? "";
18!
300
      });
2✔
301
      return row as unknown as ExplainAnalyzeRow;
2✔
302
    });
2✔
303
  }
18✔
304

305
  /**
306
   * Normalizes a raw slow query row into a more structured format.
307
   * @param {SlowQueryRaw} row - The raw slow query data
308
   * @returns {SlowQueryNormalized} The normalized slow query data
309
   */
310
  normalizeSlowQuery(row: SlowQueryRaw): SlowQueryNormalized {
32✔
311
    return {
4✔
312
      time: row.Time,
4✔
313
      txnStartTs: row.Txn_start_ts,
4✔
314
      user: row.User,
4✔
315
      host: row.Host,
4✔
316
      connId: row.Conn_ID,
4✔
317
      db: row.DB,
4✔
318
      query: row.Query,
4✔
319
      digest: row.Digest,
4✔
320
      queryTime: row.Query_time,
4✔
321
      compileTime: row.Compile_time,
4✔
322
      optimizeTime: row.Optimize_time,
4✔
323
      processTime: row.Process_time,
4✔
324
      waitTime: row.Wait_time,
4✔
325
      parseTime: row.Parse_time,
4✔
326
      rewriteTime: row.Rewrite_time,
4✔
327
      copTime: row.Cop_time,
4✔
328
      copProcAvg: row.Cop_proc_avg,
4✔
329
      copProcMax: row.Cop_proc_max,
4✔
330
      copProcP90: row.Cop_proc_p90,
4✔
331
      copProcAddr: row.Cop_proc_addr,
4✔
332
      copWaitAvg: row.Cop_wait_avg,
4✔
333
      copWaitMax: row.Cop_wait_max,
4✔
334
      copWaitP90: row.Cop_wait_p90,
4✔
335
      copWaitAddr: row.Cop_wait_addr,
4✔
336
      memMax: row.Mem_max,
4✔
337
      diskMax: row.Disk_max,
4✔
338
      totalKeys: row.Total_keys,
4✔
339
      processKeys: row.Process_keys,
4✔
340
      requestCount: row.Request_count,
4✔
341
      kvTotal: row.KV_total,
4✔
342
      pdTotal: row.PD_total,
4✔
343
      resultRows: row.Result_rows,
4✔
344
      rocksdbBlockCacheHitCount: row.Rocksdb_block_cache_hit_count,
4✔
345
      rocksdbBlockReadCount: row.Rocksdb_block_read_count,
4✔
346
      rocksdbBlockReadByte: row.Rocksdb_block_read_byte,
4✔
347
      plan: row.Plan,
4✔
348
      binaryPlan: row.Binary_plan,
4✔
349
      planDigest: row.Plan_digest,
4✔
350
      parsedPlan: this.decodedPlan(row.Plan),
4✔
351
    };
4✔
352
  }
4✔
353

354
  /**
355
   * Builds a SQL query for retrieving cluster statement history.
356
   * @param {string[]} tables - The tables to analyze
357
   * @param {Date} [from] - The start date for the analysis
358
   * @param {Date} [to] - The end date for the analysis
359
   * @returns {string} The SQL query for cluster statement history
360
   */
361
  buildClusterStatementQuery(tables: string[], from?: Date, to?: Date): string {
32✔
362
    const formatDateTime = (date: Date): string => moment(date).format("YYYY-MM-DDTHH:mm:ss.SSS");
8✔
363

364
    const tableConditions = tables
8✔
365
      .map((table) => `TABLE_NAMES = CONCAT(SCHEMA_NAME, '.', '${table}')`)
8✔
366
      .join(" OR ");
8✔
367

368
    const timeConditions: string[] = [];
8✔
369
    if (from) {
8✔
370
      timeConditions.push(`SUMMARY_BEGIN_TIME >= '${formatDateTime(from)}'`);
4✔
371
    }
4✔
372
    if (to) {
8✔
373
      timeConditions.push(`SUMMARY_END_TIME <= '${formatDateTime(to)}'`);
4✔
374
    }
4✔
375

376
    const whereClauses = [`(${tableConditions})`, ...timeConditions];
8✔
377

378
    return `
8✔
379
      SELECT *
380
      FROM (
381
        SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY
382
        UNION ALL
383
        SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
384
      ) AS combined
385
      WHERE ${whereClauses.join(" AND ")}
8✔
386
    `;
387
  }
8✔
388

389
  /**
390
   * Retrieves and analyzes slow queries from the database.
391
   * @returns {Promise<SlowQueryNormalized[]>} The normalized slow query data
392
   */
393
  async analyzeSlowQueries(): Promise<SlowQueryNormalized[]> {
32✔
394
    const results = await this.forgeOperations.fetch().executeRawSQL<SlowQueryRaw>(`
2✔
395
      SELECT *
396
      FROM information_schema.slow_query
397
      ORDER BY time DESC
398
    `);
2✔
399
    return results.map((row) => this.normalizeSlowQuery(row));
2✔
400
  }
2✔
401

402
  /**
403
   * Converts a cluster statement row to camelCase format.
404
   * @param {Record<string, any>} input - The input row data
405
   * @returns {ClusterStatementRowCamelCase} The converted row data
406
   */
407
  mapToCamelCaseClusterStatement(input: Record<string, any>): ClusterStatementRowCamelCase {
32✔
408
    if (!input) {
8!
NEW
409
      return {} as ClusterStatementRowCamelCase;
×
NEW
410
    }
×
411

412
    const result: any = {};
8✔
413
    result.parsedPlan = this.decodedPlan(input["PLAN"] ?? "");
8✔
414
    for (const key in input) {
8✔
415
      const camelKey = key.toLowerCase().replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
18✔
416
      result[camelKey] = input[key];
18✔
417
    }
18✔
418

419
    return result as ClusterStatementRowCamelCase;
8✔
420
  }
8✔
421

422
  /**
423
   * Analyzes query history for specific tables using raw table names.
424
   * @param {string[]} tables - The table names to analyze
425
   * @param {Date} [fromDate] - The start date for the analysis
426
   * @param {Date} [toDate] - The end date for the analysis
427
   * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
428
   */
429
  async analyzeQueriesHistoryRaw(
32✔
430
    tables: string[],
4✔
431
    fromDate?: Date,
4✔
432
    toDate?: Date,
4✔
433
  ): Promise<ClusterStatementRowCamelCase[]> {
4✔
434
    const results = await this.forgeOperations
4✔
435
      .fetch()
4✔
436
      .executeRawSQL<ClusterStatementRow>(
4✔
437
        this.buildClusterStatementQuery(tables, fromDate, toDate),
4✔
438
      );
4✔
439
    return results.map((r) => this.mapToCamelCaseClusterStatement(r));
4✔
440
  }
4✔
441

442
  /**
443
   * Analyzes query history for specific tables using Drizzle table objects.
444
   * @param {AnyMySqlTable[]} tables - The Drizzle table objects to analyze
445
   * @param {Date} [fromDate] - The start date for the analysis
446
   * @param {Date} [toDate] - The end date for the analysis
447
   * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
448
   */
449
  async analyzeQueriesHistory(
32✔
450
    tables: AnyMySqlTable[],
2✔
451
    fromDate?: Date,
2✔
452
    toDate?: Date,
2✔
453
  ): Promise<ClusterStatementRowCamelCase[]> {
2✔
454
    return this.analyzeQueriesHistoryRaw(
2✔
455
      tables.map((table) => getTableName(table)),
2✔
456
      fromDate,
2✔
457
      toDate,
2✔
458
    );
2✔
459
  }
2✔
460
}
32✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc