• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vzakharchenko / forge-sql-orm / 14383914811

10 Apr 2025 03:07PM UTC coverage: 80.028% (+3.5%) from 76.514%
14383914811

Pull #41

github

web-flow
Merge 55203ac8d into 0fa3fd4a5
Pull Request #41: 2.0.19

223 of 278 branches covered (80.22%)

Branch coverage included in aggregate %.

181 of 191 new or added lines in 7 files covered. (94.76%)

2 existing lines in 2 files now uncovered.

923 of 1154 relevant lines covered (79.98%)

19.42 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.4
/src/core/ForgeSQLAnalizeOperations.ts
1
import { ForgeSqlOperation, SchemaAnalyzeForgeSql } from "./ForgeSQLQueryBuilder";
2
import { Query } from "drizzle-orm";
3
import {
4
  ClusterStatementRowCamelCase,
5
  ExplainAnalyzeRow,
6
  SlowQueryNormalized,
7
} from "./SystemTables";
8
import { SqlParameters } from "@forge/sql/out/sql-statement";
9
import { AnyMySqlTable } from "drizzle-orm/mysql-core/index";
10
import { getTableName } from "drizzle-orm/table";
2✔
11
import moment from "moment";
2✔
12

13
/**
14
 * Interface representing a row from the EXPLAIN ANALYZE output
15
 */
16
interface DecodedPlanRow {
17
  id: string;
18
  estRows?: string;
19
  estCost?: string;
20
  actRows?: string;
21
  task?: string;
22
  "access object"?: string;
23
  "execution info"?: string;
24
  "operator info"?: string;
25
  memory?: string;
26
  disk?: string;
27
}
28

29
/**
30
 * Interface representing a raw slow query row from the database
31
 */
32
interface SlowQueryRaw {
33
  Time: string;
34
  Txn_start_ts: number;
35
  User: string;
36
  Host: string;
37
  Conn_ID: number;
38
  DB: string;
39
  Query: string;
40
  Digest: string;
41
  Query_time: number;
42
  Compile_time: number;
43
  Optimize_time: number;
44
  Process_time: number;
45
  Wait_time: number;
46
  Parse_time: number;
47
  Rewrite_time: number;
48
  Cop_time: number;
49
  Cop_proc_avg: number;
50
  Cop_proc_max: number;
51
  Cop_proc_p90: number;
52
  Cop_proc_addr: string;
53
  Cop_wait_avg: number;
54
  Cop_wait_max: number;
55
  Cop_wait_p90: number;
56
  Cop_wait_addr: string;
57
  Mem_max: number;
58
  Disk_max: number;
59
  Total_keys: number;
60
  Process_keys: number;
61
  Request_count: number;
62
  KV_total: number;
63
  PD_total: number;
64
  Result_rows: number;
65
  Rocksdb_block_cache_hit_count: number;
66
  Rocksdb_block_read_count: number;
67
  Rocksdb_block_read_byte: number;
68
  Plan: string;
69
  Binary_plan: string;
70
  Plan_digest: string;
71
}
72

73
/**
74
 * Interface representing a row from the cluster statements table
75
 */
76
export interface ClusterStatementRow {
77
  INSTANCE: string;
78
  SUMMARY_BEGIN_TIME: string;
79
  SUMMARY_END_TIME: string;
80
  STMT_TYPE: string;
81
  SCHEMA_NAME: string;
82
  DIGEST: string;
83
  DIGEST_TEXT: string;
84
  TABLE_NAMES: string;
85
  INDEX_NAMES: string | null;
86
  SAMPLE_USER: string;
87
  EXEC_COUNT: number;
88
  SUM_ERRORS: number;
89
  SUM_WARNINGS: number;
90
  SUM_LATENCY: number;
91
  MAX_LATENCY: number;
92
  MIN_LATENCY: number;
93
  AVG_LATENCY: number;
94
  AVG_PARSE_LATENCY: number;
95
  MAX_PARSE_LATENCY: number;
96
  AVG_COMPILE_LATENCY: number;
97
  MAX_COMPILE_LATENCY: number;
98
  SUM_COP_TASK_NUM: number;
99
  MAX_COP_PROCESS_TIME: number;
100
  MAX_COP_PROCESS_ADDRESS: string;
101
  MAX_COP_WAIT_TIME: number;
102
  MAX_COP_WAIT_ADDRESS: string;
103
  AVG_PROCESS_TIME: number;
104
  MAX_PROCESS_TIME: number;
105
  AVG_WAIT_TIME: number;
106
  MAX_WAIT_TIME: number;
107
  AVG_BACKOFF_TIME: number;
108
  MAX_BACKOFF_TIME: number;
109
  AVG_TOTAL_KEYS: number;
110
  MAX_TOTAL_KEYS: number;
111
  AVG_PROCESSED_KEYS: number;
112
  MAX_PROCESSED_KEYS: number;
113
  AVG_ROCKSDB_DELETE_SKIPPED_COUNT: number;
114
  MAX_ROCKSDB_DELETE_SKIPPED_COUNT: number;
115
  AVG_ROCKSDB_KEY_SKIPPED_COUNT: number;
116
  MAX_ROCKSDB_KEY_SKIPPED_COUNT: number;
117
  AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
118
  MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
119
  AVG_ROCKSDB_BLOCK_READ_COUNT: number;
120
  MAX_ROCKSDB_BLOCK_READ_COUNT: number;
121
  AVG_ROCKSDB_BLOCK_READ_BYTE: number;
122
  MAX_ROCKSDB_BLOCK_READ_BYTE: number;
123
  AVG_PREWRITE_TIME: number;
124
  MAX_PREWRITE_TIME: number;
125
  AVG_COMMIT_TIME: number;
126
  MAX_COMMIT_TIME: number;
127
  AVG_GET_COMMIT_TS_TIME: number;
128
  MAX_GET_COMMIT_TS_TIME: number;
129
  AVG_COMMIT_BACKOFF_TIME: number;
130
  MAX_COMMIT_BACKOFF_TIME: number;
131
  AVG_RESOLVE_LOCK_TIME: number;
132
  MAX_RESOLVE_LOCK_TIME: number;
133
  AVG_LOCAL_LATCH_WAIT_TIME: number;
134
  MAX_LOCAL_LATCH_WAIT_TIME: number;
135
  AVG_WRITE_KEYS: number;
136
  MAX_WRITE_KEYS: number;
137
  AVG_WRITE_SIZE: number;
138
  MAX_WRITE_SIZE: number;
139
  AVG_PREWRITE_REGIONS: number;
140
  MAX_PREWRITE_REGIONS: number;
141
  AVG_TXN_RETRY: number;
142
  MAX_TXN_RETRY: number;
143
  SUM_EXEC_RETRY: number;
144
  SUM_EXEC_RETRY_TIME: number;
145
  SUM_BACKOFF_TIMES: number;
146
  BACKOFF_TYPES: string | null;
147
  AVG_MEM: number;
148
  MAX_MEM: number;
149
  AVG_DISK: number;
150
  MAX_DISK: number;
151
  AVG_KV_TIME: number;
152
  AVG_PD_TIME: number;
153
  AVG_BACKOFF_TOTAL_TIME: number;
154
  AVG_WRITE_SQL_RESP_TIME: number;
155
  AVG_TIDB_CPU_TIME: number;
156
  AVG_TIKV_CPU_TIME: number;
157
  MAX_RESULT_ROWS: number;
158
  MIN_RESULT_ROWS: number;
159
  AVG_RESULT_ROWS: number;
160
  PREPARED: number;
161
  AVG_AFFECTED_ROWS: number;
162
  FIRST_SEEN: string;
163
  LAST_SEEN: string;
164
  PLAN_IN_CACHE: number;
165
  PLAN_CACHE_HITS: number;
166
  PLAN_IN_BINDING: number;
167
  QUERY_SAMPLE_TEXT: string;
168
  PREV_SAMPLE_TEXT: string;
169
  PLAN_DIGEST: string;
170
  PLAN: string;
171
  BINARY_PLAN: string;
172
  CHARSET: string;
173
  COLLATION: string;
174
  PLAN_HINT: string;
175
  MAX_REQUEST_UNIT_READ: number;
176
  AVG_REQUEST_UNIT_READ: number;
177
  MAX_REQUEST_UNIT_WRITE: number;
178
  AVG_REQUEST_UNIT_WRITE: number;
179
  MAX_QUEUED_RC_TIME: number;
180
  AVG_QUEUED_RC_TIME: number;
181
  RESOURCE_GROUP: string;
182
  PLAN_CACHE_UNQUALIFIED: number;
183
  PLAN_CACHE_UNQUALIFIED_LAST_REASON: string;
184
}
185

186
/**
187
 * Class implementing SQL analysis operations for ForgeSQL ORM.
188
 * Provides methods for analyzing query performance, execution plans, and slow queries.
189
 */
190
export class ForgeSQLAnalizeOperation implements SchemaAnalyzeForgeSql {
2✔
191
  private readonly forgeOperations: ForgeSqlOperation;
32✔
192

193
  /**
194
   * Creates a new instance of ForgeSQLAnalizeOperation.
195
   * @param {ForgeSqlOperation} forgeOperations - The ForgeSQL operations instance
196
   */
197
  constructor(forgeOperations: ForgeSqlOperation) {
32✔
198
    this.forgeOperations = forgeOperations;
32✔
199
    this.mapToCamelCaseClusterStatement = this.mapToCamelCaseClusterStatement.bind(this);
32✔
200
  }
32✔
201

202
  /**
203
   * Executes EXPLAIN ANALYZE on a raw SQL query.
204
   * @param {string} query - The SQL query to analyze
205
   * @param {unknown[]} bindParams - The query parameters
206
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
207
   */
208
  async explainAnalyzeRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
32✔
209
    const results = await this.forgeOperations
6✔
210
      .fetch()
6✔
211
      .executeRawSQL<DecodedPlanRow>(`EXPLAIN ANALYZE ${query}`, bindParams as SqlParameters);
6✔
212
    return results.map((row) => ({
6✔
213
      id: row.id,
4✔
214
      estRows: row.estRows,
4✔
215
      actRows: row.actRows,
4✔
216
      task: row.task,
4✔
217
      accessObject: row["access object"],
4✔
218
      executionInfo: row["execution info"],
4✔
219
      operatorInfo: row["operator info"],
4✔
220
      memory: row.memory,
4✔
221
      disk: row.disk,
4✔
222
    }));
6✔
223
  }
6✔
224

225
  /**
226
   * Executes EXPLAIN ANALYZE on a Drizzle query.
227
   * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
228
   * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
229
   */
230
  async explainAnalyze(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
32✔
231
    const { sql, params } = query.toSQL();
2✔
232
    return this.explainAnalyzeRaw(sql, params);
2✔
233
  }
2✔
234

235
  /**
236
   * Decodes a query execution plan from its string representation.
237
   * @param {string} input - The raw execution plan string
238
   * @returns {ExplainAnalyzeRow[]} The decoded execution plan rows
239
   */
240
  decodedPlan(input: string): ExplainAnalyzeRow[] {
32✔
241
    if (!input) {
18✔
242
      return [];
6✔
243
    }
6✔
244
    const lines = input.trim().split("\n");
12✔
245
    if (lines.length < 2) return [];
18✔
246

247
    const headersRaw = lines[0]
2✔
248
      .split("\t")
2✔
249
      .map((h) => h.trim())
2✔
250
      .filter(Boolean);
2✔
251
    const headers = headersRaw.map((h) => {
2✔
252
      return h
18✔
253
        .replace(/\s+/g, " ")
18✔
254
        .replace(/[-\s]+(.)?/g, (_, c) => (c ? c.toUpperCase() : ""))
18!
255
        .replace(/^./, (s) => s.toLowerCase());
18✔
256
    });
2✔
257

258
    return lines.slice(1).map((line) => {
2✔
259
      const values = line
2✔
260
        .split("\t")
2✔
261
        .map((s) => s.trim())
2✔
262
        .filter(Boolean);
2✔
263
      const row: Record<string, string> = {};
2✔
264
      headers.forEach((key, i) => {
2✔
265
        row[key] = values[i] ?? "";
18!
266
      });
2✔
267
      return row as unknown as ExplainAnalyzeRow;
2✔
268
    });
2✔
269
  }
18✔
270

271
  /**
272
   * Normalizes a raw slow query row into a more structured format.
273
   * @param {SlowQueryRaw} row - The raw slow query data
274
   * @returns {SlowQueryNormalized} The normalized slow query data
275
   */
276
  normalizeSlowQuery(row: SlowQueryRaw): SlowQueryNormalized {
32✔
277
    return {
4✔
278
      time: row.Time,
4✔
279
      txnStartTs: row.Txn_start_ts,
4✔
280
      user: row.User,
4✔
281
      host: row.Host,
4✔
282
      connId: row.Conn_ID,
4✔
283
      db: row.DB,
4✔
284
      query: row.Query,
4✔
285
      digest: row.Digest,
4✔
286
      queryTime: row.Query_time,
4✔
287
      compileTime: row.Compile_time,
4✔
288
      optimizeTime: row.Optimize_time,
4✔
289
      processTime: row.Process_time,
4✔
290
      waitTime: row.Wait_time,
4✔
291
      parseTime: row.Parse_time,
4✔
292
      rewriteTime: row.Rewrite_time,
4✔
293
      copTime: row.Cop_time,
4✔
294
      copProcAvg: row.Cop_proc_avg,
4✔
295
      copProcMax: row.Cop_proc_max,
4✔
296
      copProcP90: row.Cop_proc_p90,
4✔
297
      copProcAddr: row.Cop_proc_addr,
4✔
298
      copWaitAvg: row.Cop_wait_avg,
4✔
299
      copWaitMax: row.Cop_wait_max,
4✔
300
      copWaitP90: row.Cop_wait_p90,
4✔
301
      copWaitAddr: row.Cop_wait_addr,
4✔
302
      memMax: row.Mem_max,
4✔
303
      diskMax: row.Disk_max,
4✔
304
      totalKeys: row.Total_keys,
4✔
305
      processKeys: row.Process_keys,
4✔
306
      requestCount: row.Request_count,
4✔
307
      kvTotal: row.KV_total,
4✔
308
      pdTotal: row.PD_total,
4✔
309
      resultRows: row.Result_rows,
4✔
310
      rocksdbBlockCacheHitCount: row.Rocksdb_block_cache_hit_count,
4✔
311
      rocksdbBlockReadCount: row.Rocksdb_block_read_count,
4✔
312
      rocksdbBlockReadByte: row.Rocksdb_block_read_byte,
4✔
313
      plan: row.Plan,
4✔
314
      binaryPlan: row.Binary_plan,
4✔
315
      planDigest: row.Plan_digest,
4✔
316
      parsedPlan: this.decodedPlan(row.Plan),
4✔
317
    };
4✔
318
  }
4✔
319

320
  /**
321
   * Builds a SQL query for retrieving cluster statement history.
322
   * @param {string[]} tables - The tables to analyze
323
   * @param {Date} [from] - The start date for the analysis
324
   * @param {Date} [to] - The end date for the analysis
325
   * @returns {string} The SQL query for cluster statement history
326
   */
327
  buildClusterStatementQuery(tables: string[], from?: Date, to?: Date): string {
32✔
328
    const formatDateTime = (date: Date): string => moment(date).format("YYYY-MM-DDTHH:mm:ss.SSS");
8✔
329

330
    const tableConditions = tables
8✔
331
      .map((table) => `TABLE_NAMES = CONCAT(SCHEMA_NAME, '.', '${table}')`)
8✔
332
      .join(" OR ");
8✔
333

334
    const timeConditions: string[] = [];
8✔
335
    if (from) {
8✔
336
      timeConditions.push(`SUMMARY_BEGIN_TIME >= '${formatDateTime(from)}'`);
4✔
337
    }
4✔
338
    if (to) {
8✔
339
      timeConditions.push(`SUMMARY_END_TIME <= '${formatDateTime(to)}'`);
4✔
340
    }
4✔
341

342
    const whereClauses = [`(${tableConditions})`, ...timeConditions];
8✔
343

344
    return `
8✔
345
      SELECT *
346
      FROM (
347
        SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY
348
        UNION ALL
349
        SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
350
      ) AS combined
351
      WHERE ${whereClauses.join(" AND ")}
8✔
352
    `;
353
  }
8✔
354

355
  /**
356
   * Retrieves and analyzes slow queries from the database.
357
   * @returns {Promise<SlowQueryNormalized[]>} The normalized slow query data
358
   */
359
  async analyzeSlowQueries(): Promise<SlowQueryNormalized[]> {
32✔
360
    const results = await this.forgeOperations.fetch().executeRawSQL<SlowQueryRaw>(`
2✔
361
      SELECT *
362
      FROM information_schema.slow_query
363
      ORDER BY time DESC
364
    `);
2✔
365
    return results.map((row) => this.normalizeSlowQuery(row));
2✔
366
  }
2✔
367

368
  /**
369
   * Converts a cluster statement row to camelCase format.
370
   * @param {Record<string, any>} input - The input row data
371
   * @returns {ClusterStatementRowCamelCase} The converted row data
372
   */
373
  mapToCamelCaseClusterStatement(input: Record<string, any>): ClusterStatementRowCamelCase {
32✔
374
    if (!input) {
8!
NEW
375
      return {} as ClusterStatementRowCamelCase;
×
NEW
376
    }
×
377

378
    const result: any = {};
8✔
379
    result.parsedPlan = this.decodedPlan(input["PLAN"] ?? "");
8✔
380
    for (const key in input) {
8✔
381
      const camelKey = key.toLowerCase().replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
18✔
382
      result[camelKey] = input[key];
18✔
383
    }
18✔
384

385
    return result as ClusterStatementRowCamelCase;
8✔
386
  }
8✔
387

388
  /**
389
   * Analyzes query history for specific tables using raw table names.
390
   * @param {string[]} tables - The table names to analyze
391
   * @param {Date} [fromDate] - The start date for the analysis
392
   * @param {Date} [toDate] - The end date for the analysis
393
   * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
394
   */
395
  async analyzeQueriesHistoryRaw(
32✔
396
    tables: string[],
4✔
397
    fromDate?: Date,
4✔
398
    toDate?: Date,
4✔
399
  ): Promise<ClusterStatementRowCamelCase[]> {
4✔
400
    const results = await this.forgeOperations
4✔
401
      .fetch()
4✔
402
      .executeRawSQL<ClusterStatementRow>(
4✔
403
        this.buildClusterStatementQuery(tables, fromDate, toDate),
4✔
404
      );
4✔
405
    return results.map((r) => this.mapToCamelCaseClusterStatement(r));
4✔
406
  }
4✔
407

408
  /**
409
   * Analyzes query history for specific tables using Drizzle table objects.
410
   * @param {AnyMySqlTable[]} tables - The Drizzle table objects to analyze
411
   * @param {Date} [fromDate] - The start date for the analysis
412
   * @param {Date} [toDate] - The end date for the analysis
413
   * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
414
   */
415
  async analyzeQueriesHistory(
32✔
416
    tables: AnyMySqlTable[],
2✔
417
    fromDate?: Date,
2✔
418
    toDate?: Date,
2✔
419
  ): Promise<ClusterStatementRowCamelCase[]> {
2✔
420
    return this.analyzeQueriesHistoryRaw(
2✔
421
      tables.map((table) => getTableName(table)),
2✔
422
      fromDate,
2✔
423
      toDate,
2✔
424
    );
2✔
425
  }
2✔
426
}
32✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc