• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

nshkrdotcom / ElixirScope / 83134b8f55cc7142f7a918aeb2e46c6944bc57c6

28 May 2025 03:15PM UTC coverage: 60.013% (-0.6%) from 60.612%
83134b8f55cc7142f7a918aeb2e46c6944bc57c6

push

github

NSHkr
Performance Optimization & Memory Management 1072 tests, 0 failures, 76 excluded

189 of 445 new or added lines in 2 files covered. (42.47%)

2 existing lines in 2 files now uncovered.

5679 of 9463 relevant lines covered (60.01%)

3649.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lib/elixir_scope/ast_repository/performance_optimizer.ex
1
defmodule ElixirScope.ASTRepository.PerformanceOptimizer do
2
  @moduledoc """
3
  Performance optimization module for the Enhanced AST Repository.
4
  
5
  Provides intelligent caching, batch operations, lazy loading, and
6
  integration with the MemoryManager for optimal performance.
7
  
8
  ## Features
9
  
10
  - **Smart Caching**: Multi-level caching with TTL and LRU eviction
11
  - **Batch Operations**: Bulk storage and retrieval optimizations
12
  - **Lazy Loading**: On-demand analysis generation
13
  - **Memory Integration**: Seamless MemoryManager integration
14
  - **Query Optimization**: Intelligent query result caching
15
  - **ETS Optimization**: Optimized table structures and indexes
16
  
17
  ## Performance Targets
18
  
19
  - Module storage: <5ms per module (optimized from 10ms)
20
  - Query response: <50ms for 95th percentile (optimized from 100ms)
21
  - Cache hit ratio: >85% for repeated operations
22
  - Memory efficiency: 30% reduction in memory usage
23
  """
24
  
25
  use GenServer
26
  require Logger
27
  
28
  alias ElixirScope.ASTRepository.{MemoryManager, EnhancedRepository}
29
  alias ElixirScope.ASTRepository.Enhanced.{EnhancedModuleData, EnhancedFunctionData}
30
  
31
  # Performance optimization configuration
32
  @batch_size 50
33
  @lazy_loading_threshold 1000  # bytes
34
  @cache_warming_interval 300_000  # 5 minutes
35
  @optimization_interval 600_000   # 10 minutes
36
  
37
  # Cache keys for different data types
38
  @module_cache_prefix "module:"
39
  @function_cache_prefix "function:"
40
  @analysis_cache_prefix "analysis:"
41
  @query_cache_prefix "query:"
42
  
43
  defstruct [
44
    :optimization_stats,
45
    :cache_stats,
46
    :batch_stats,
47
    :lazy_loading_stats,
48
    :enabled
49
  ]
50
  
51
  @type optimization_stats :: %{
52
    modules_optimized: non_neg_integer(),
53
    functions_optimized: non_neg_integer(),
54
    cache_optimizations: non_neg_integer(),
55
    memory_optimizations: non_neg_integer(),
56
    query_optimizations: non_neg_integer(),
57
    total_time_saved_ms: non_neg_integer()
58
  }
59
  
60
  # GenServer API
61
  
62
  def start_link(opts \\ []) do
NEW
63
    GenServer.start_link(__MODULE__, opts, name: __MODULE__)
×
64
  end
65
  
66
  def init(opts) do
67
    # Schedule optimization tasks
NEW
68
    schedule_cache_warming()
×
NEW
69
    schedule_optimization_cycle()
×
70
    
NEW
71
    state = %__MODULE__{
×
72
      optimization_stats: init_optimization_stats(),
73
      cache_stats: init_cache_stats(),
74
      batch_stats: init_batch_stats(),
75
      lazy_loading_stats: init_lazy_loading_stats(),
76
      enabled: Keyword.get(opts, :enabled, true)
77
    }
78
    
NEW
79
    Logger.info("PerformanceOptimizer started with optimizations enabled: #{state.enabled}")
×
80
    {:ok, state}
81
  end
82
  
83
  # Public API
84
  
85
  @doc """
86
  Optimizes module storage with intelligent caching and batching.
87
  
88
  ## Options
89
  
90
  - `:batch_mode` - Enable batch processing (default: false)
91
  - `:lazy_analysis` - Enable lazy analysis generation (default: true)
92
  - `:cache_priority` - Cache priority level (:high, :normal, :low) (default: :normal)
93
  
94
  ## Returns
95
  
96
  - `{:ok, enhanced_data}` - Module stored and optimized
97
  - `{:error, reason}` - Storage failed
98
  """
99
  @spec store_module_optimized(atom(), term(), keyword()) :: {:ok, EnhancedModuleData.t()} | {:error, term()}
100
  def store_module_optimized(module_name, ast, opts \\ []) do
NEW
101
    GenServer.call(__MODULE__, {:store_module_optimized, module_name, ast, opts})
×
102
  end
103
  
104
  @doc """
105
  Optimizes function storage with CFG/DFG lazy loading.
106
  """
107
  @spec store_function_optimized(atom(), atom(), non_neg_integer(), term(), keyword()) :: 
108
    {:ok, EnhancedFunctionData.t()} | {:error, term()}
109
  def store_function_optimized(module_name, function_name, arity, ast, opts \\ []) do
NEW
110
    GenServer.call(__MODULE__, {:store_function_optimized, module_name, function_name, arity, ast, opts})
×
111
  end
112
  
113
  @doc """
114
  Performs batch storage operations for multiple modules.
115
  """
116
  @spec store_modules_batch([{atom(), term()}], keyword()) :: {:ok, [EnhancedModuleData.t()]} | {:error, term()}
117
  def store_modules_batch(modules, opts \\ []) do
NEW
118
    GenServer.call(__MODULE__, {:store_modules_batch, modules, opts}, 60_000)
×
119
  end
120
  
121
  @doc """
122
  Retrieves module with intelligent caching.
123
  """
124
  @spec get_module_optimized(atom()) :: {:ok, EnhancedModuleData.t()} | {:error, term()}
125
  def get_module_optimized(module_name) do
NEW
126
    cache_key = @module_cache_prefix <> to_string(module_name)
×
127
    
NEW
128
    case MemoryManager.cache_get(:query, cache_key) do
×
129
      {:ok, cached_data} ->
130
        # Cache hit - update access tracking
NEW
131
        track_access(module_name, :cache_hit)
×
132
        {:ok, cached_data}
133
      
134
      :miss ->
135
        # Cache miss - fetch from repository
NEW
136
        case EnhancedRepository.get_enhanced_module(module_name) do
×
137
          {:ok, module_data} ->
138
            # Cache the result
NEW
139
            MemoryManager.cache_put(:query, cache_key, module_data)
×
NEW
140
            track_access(module_name, :cache_miss)
×
141
            {:ok, module_data}
142
          
143
          error ->
NEW
144
            error
×
145
        end
146
    end
147
  end
148
  
149
  @doc """
150
  Retrieves function with lazy analysis loading.
151
  """
152
  @spec get_function_optimized(atom(), atom(), non_neg_integer()) :: {:ok, EnhancedFunctionData.t()} | {:error, term()}
153
  def get_function_optimized(module_name, function_name, arity) do
NEW
154
    cache_key = @function_cache_prefix <> "#{module_name}.#{function_name}/#{arity}"
×
155
    
NEW
156
    case MemoryManager.cache_get(:analysis, cache_key) do
×
157
      {:ok, cached_data} ->
NEW
158
        track_access({module_name, function_name, arity}, :cache_hit)
×
159
        {:ok, cached_data}
160
      
161
      :miss ->
NEW
162
        case EnhancedRepository.get_enhanced_function(module_name, function_name, arity) do
×
163
          {:ok, function_data} ->
164
            # Apply lazy loading optimizations
NEW
165
            optimized_data = apply_lazy_loading(function_data)
×
NEW
166
            MemoryManager.cache_put(:analysis, cache_key, optimized_data)
×
NEW
167
            track_access({module_name, function_name, arity}, :cache_miss)
×
168
            {:ok, optimized_data}
169
          
170
          error ->
NEW
171
            error
×
172
        end
173
    end
174
  end
175
  
176
  @doc """
177
  Performs optimized analysis queries with result caching.
178
  """
179
  @spec query_analysis_optimized(atom(), map()) :: {:ok, term()} | {:error, term()}
180
  def query_analysis_optimized(query_type, params) do
NEW
181
    GenServer.call(__MODULE__, {:query_analysis_optimized, query_type, params})
×
182
  end
183
  
184
  @doc """
185
  Warms up caches with frequently accessed data.
186
  """
187
  @spec warm_caches() :: :ok
188
  def warm_caches() do
NEW
189
    GenServer.cast(__MODULE__, :warm_caches)
×
190
  end
191
  
192
  @doc """
193
  Optimizes ETS table structures and indexes.
194
  """
195
  @spec optimize_ets_tables() :: :ok
196
  def optimize_ets_tables() do
NEW
197
    GenServer.cast(__MODULE__, :optimize_ets_tables)
×
198
  end
199
  
200
  @doc """
201
  Gets comprehensive optimization statistics.
202
  """
203
  @spec get_optimization_stats() :: {:ok, map()}
204
  def get_optimization_stats() do
NEW
205
    GenServer.call(__MODULE__, :get_optimization_stats)
×
206
  end
207
  
208
  @doc """
209
  Enables or disables performance optimizations.
210
  """
211
  @spec set_optimization_enabled(boolean()) :: :ok
212
  def set_optimization_enabled(enabled) do
NEW
213
    GenServer.call(__MODULE__, {:set_optimization_enabled, enabled})
×
214
  end
215
  
216
  # GenServer Callbacks
217
  
218
  def handle_call({:store_module_optimized, module_name, ast, opts}, _from, state) do
NEW
219
    if state.enabled do
×
NEW
220
      start_time = System.monotonic_time(:microsecond)
×
221
      
NEW
222
      try do
×
223
        # Check if lazy analysis is enabled
NEW
224
        lazy_analysis = Keyword.get(opts, :lazy_analysis, true)
×
NEW
225
        batch_mode = Keyword.get(opts, :batch_mode, false)
×
226
        
NEW
227
        result = if batch_mode do
×
228
          # Store in batch queue for later processing
NEW
229
          queue_for_batch_processing(module_name, ast, opts)
×
230
          {:ok, :queued_for_batch}
231
        else
232
          # Store immediately with optimizations
NEW
233
          store_module_with_optimizations(module_name, ast, lazy_analysis)
×
234
        end
235
        
236
        # Update statistics
NEW
237
        end_time = System.monotonic_time(:microsecond)
×
NEW
238
        duration = end_time - start_time
×
239
        
NEW
240
        new_stats = update_optimization_stats(state.optimization_stats, :module_storage, duration)
×
NEW
241
        new_state = %{state | optimization_stats: new_stats}
×
242
        
NEW
243
        {:reply, result, new_state}
×
244
      rescue
NEW
245
        error ->
×
NEW
246
          Logger.error("Optimized module storage failed: #{inspect(error)}")
×
NEW
247
          {:reply, {:error, {:optimization_failed, error}}, state}
×
248
      end
249
    else
250
      # Fall back to standard storage
NEW
251
      result = EnhancedRepository.store_enhanced_module(module_name, ast, opts)
×
NEW
252
      {:reply, result, state}
×
253
    end
254
  end
255
  
256
  def handle_call({:store_function_optimized, module_name, function_name, arity, ast, opts}, _from, state) do
NEW
257
    if state.enabled do
×
NEW
258
      start_time = System.monotonic_time(:microsecond)
×
259
      
NEW
260
      try do
×
261
        # Apply function-specific optimizations
NEW
262
        result = store_function_with_optimizations(module_name, function_name, arity, ast, opts)
×
263
        
264
        # Update statistics
NEW
265
        end_time = System.monotonic_time(:microsecond)
×
NEW
266
        duration = end_time - start_time
×
267
        
NEW
268
        new_stats = update_optimization_stats(state.optimization_stats, :function_storage, duration)
×
NEW
269
        new_state = %{state | optimization_stats: new_stats}
×
270
        
NEW
271
        {:reply, result, new_state}
×
272
      rescue
NEW
273
        error ->
×
NEW
274
          Logger.error("Optimized function storage failed: #{inspect(error)}")
×
NEW
275
          {:reply, {:error, {:optimization_failed, error}}, state}
×
276
      end
277
    else
278
      # Fall back to standard storage
NEW
279
      result = EnhancedRepository.store_enhanced_function(module_name, function_name, arity, ast, opts)
×
NEW
280
      {:reply, result, state}
×
281
    end
282
  end
283
  
284
  def handle_call({:store_modules_batch, modules, opts}, _from, state) do
NEW
285
    if state.enabled do
×
NEW
286
      start_time = System.monotonic_time(:microsecond)
×
287
      
NEW
288
      try do
×
289
        # Process modules in optimized batches
NEW
290
        results = process_modules_in_batches(modules, opts)
×
291
        
292
        # Update batch statistics
NEW
293
        end_time = System.monotonic_time(:microsecond)
×
NEW
294
        duration = end_time - start_time
×
295
        
NEW
296
        new_batch_stats = update_batch_stats(state.batch_stats, length(modules), duration)
×
NEW
297
        new_state = %{state | batch_stats: new_batch_stats}
×
298
        
NEW
299
        {:reply, {:ok, results}, new_state}
×
300
      rescue
NEW
301
        error ->
×
NEW
302
          Logger.error("Batch storage failed: #{inspect(error)}")
×
NEW
303
          {:reply, {:error, {:batch_failed, error}}, state}
×
304
      end
305
    else
306
      # Fall back to individual storage
NEW
307
      results = Enum.map(modules, fn {module_name, ast} ->
×
NEW
308
        case EnhancedRepository.store_enhanced_module(module_name, ast, opts) do
×
NEW
309
          {:ok, data} -> data
×
NEW
310
          {:error, _} -> nil
×
311
        end
312
      end)
313
      
NEW
314
      {:reply, {:ok, Enum.filter(results, & &1)}, state}
×
315
    end
316
  end
317
  
318
  def handle_call({:query_analysis_optimized, query_type, params}, _from, state) do
NEW
319
    if state.enabled do
×
NEW
320
      start_time = System.monotonic_time(:microsecond)
×
321
      
322
      # Generate cache key for query
NEW
323
      cache_key = generate_query_cache_key(query_type, params)
×
324
      
NEW
325
      result = case MemoryManager.cache_get(:query, cache_key) do
×
NEW
326
        {:ok, cached_result} ->
×
327
          # Cache hit
328
          {:ok, cached_result}
329
        
330
        :miss ->
331
          # Cache miss - perform query and cache result
NEW
332
          case EnhancedRepository.query_analysis(query_type, params) do
×
333
            {:ok, query_result} ->
334
              # Cache the result with appropriate TTL
NEW
335
              MemoryManager.cache_put(:query, cache_key, query_result)
×
336
              {:ok, query_result}
337
            
338
            error ->
NEW
339
              error
×
340
          end
341
      end
342
      
343
      # Update query statistics
NEW
344
      end_time = System.monotonic_time(:microsecond)
×
NEW
345
      duration = end_time - start_time
×
346
      
NEW
347
      new_stats = update_optimization_stats(state.optimization_stats, :query_optimization, duration)
×
NEW
348
      new_state = %{state | optimization_stats: new_stats}
×
349
      
NEW
350
      {:reply, result, new_state}
×
351
    else
352
      # Fall back to standard query
NEW
353
      result = EnhancedRepository.query_analysis(query_type, params)
×
NEW
354
      {:reply, result, state}
×
355
    end
356
  end
357
  
358
  def handle_call(:get_optimization_stats, _from, state) do
NEW
359
    stats = %{
×
NEW
360
      optimization: state.optimization_stats,
×
NEW
361
      cache: state.cache_stats,
×
NEW
362
      batch: state.batch_stats,
×
NEW
363
      lazy_loading: state.lazy_loading_stats,
×
NEW
364
      enabled: state.enabled
×
365
    }
366
    
NEW
367
    {:reply, {:ok, stats}, state}
×
368
  end
369
  
370
  def handle_call({:set_optimization_enabled, enabled}, _from, state) do
NEW
371
    new_state = %{state | enabled: enabled}
×
NEW
372
    Logger.info("Performance optimizations #{if enabled, do: "enabled", else: "disabled"}")
×
NEW
373
    {:reply, :ok, new_state}
×
374
  end
375
  
376
  def handle_cast(:warm_caches, state) do
NEW
377
    if state.enabled do
×
NEW
378
      perform_cache_warming()
×
379
    end
380
    {:noreply, state}
381
  end
382
  
383
  def handle_cast(:optimize_ets_tables, state) do
NEW
384
    if state.enabled do
×
NEW
385
      perform_ets_optimization()
×
386
    end
387
    {:noreply, state}
388
  end
389
  
390
  def handle_info(:cache_warming, state) do
NEW
391
    if state.enabled do
×
NEW
392
      perform_cache_warming()
×
393
    end
394
    
NEW
395
    schedule_cache_warming()
×
396
    {:noreply, state}
397
  end
398
  
399
  def handle_info(:optimization_cycle, state) do
NEW
400
    if state.enabled do
×
NEW
401
      perform_optimization_cycle()
×
402
    end
403
    
NEW
404
    schedule_optimization_cycle()
×
405
    {:noreply, state}
406
  end
407
  
408
  # Private Implementation
409
  
410
  defp store_module_with_optimizations(module_name, ast, lazy_analysis) do
411
    # Pre-process AST for optimization
NEW
412
    optimized_ast = preprocess_ast_for_storage(ast)
×
413
    
414
    # Determine what analysis to perform immediately vs lazily
NEW
415
    immediate_analysis = if lazy_analysis do
×
416
      [:basic_metrics, :dependencies]
417
    else
418
      [:all]
419
    end
420
    
421
    # Store with selective analysis
NEW
422
    opts = [analysis_level: immediate_analysis, optimized: true]
×
NEW
423
    EnhancedRepository.store_enhanced_module(module_name, optimized_ast, opts)
×
424
  end
425
  
426
  defp store_function_with_optimizations(module_name, function_name, arity, ast, opts) do
427
    # Check if function is large enough to warrant lazy loading
NEW
428
    ast_size = estimate_ast_size(ast)
×
429
    
NEW
430
    if ast_size > @lazy_loading_threshold do
×
431
      # Store with lazy analysis
NEW
432
      lazy_opts = Keyword.put(opts, :lazy_analysis, true)
×
NEW
433
      EnhancedRepository.store_enhanced_function(module_name, function_name, arity, ast, lazy_opts)
×
434
    else
435
      # Store with full analysis
NEW
436
      EnhancedRepository.store_enhanced_function(module_name, function_name, arity, ast, opts)
×
437
    end
438
  end
439
  
440
  defp process_modules_in_batches(modules, _opts) do
441
    modules
442
    |> Enum.chunk_every(@batch_size)
NEW
443
    |> Enum.flat_map(fn batch ->
×
444
      # Process batch concurrently
NEW
445
      tasks = Enum.map(batch, fn {module_name, ast} ->
×
NEW
446
        Task.async(fn ->
×
NEW
447
          case store_module_with_optimizations(module_name, ast, true) do
×
NEW
448
            {:ok, data} -> data
×
NEW
449
            {:error, _} -> nil
×
450
          end
451
        end)
452
      end)
453
      
454
      # Collect results with timeout
455
      Task.await_many(tasks, 30_000)
NEW
456
      |> Enum.filter(& &1)
×
457
    end)
458
  end
459
  
460
  defp apply_lazy_loading(function_data) do
461
    # Check if expensive analysis data should be loaded lazily
NEW
462
    cond do
×
NEW
463
      is_nil(function_data.cfg_data) and should_load_cfg?(function_data) ->
×
464
        # Load CFG on demand
NEW
465
        case EnhancedRepository.get_cfg(function_data.module_name, function_data.function_name, function_data.arity) do
×
NEW
466
          {:ok, cfg} -> %{function_data | cfg_data: cfg}
×
NEW
467
          _ -> function_data
×
468
        end
469
      
NEW
470
      is_nil(function_data.dfg_data) and should_load_dfg?(function_data) ->
×
471
        # Load DFG on demand
NEW
472
        case EnhancedRepository.get_dfg(function_data.module_name, function_data.function_name, function_data.arity) do
×
NEW
473
          {:ok, dfg} -> %{function_data | dfg_data: dfg}
×
NEW
474
          _ -> function_data
×
475
        end
476
      
NEW
477
      true ->
×
NEW
478
        function_data
×
479
    end
480
  end
481
  
482
  defp should_load_cfg?(function_data) do
483
    # Load CFG if function is complex or frequently accessed
NEW
484
    complexity = get_function_complexity(function_data)
×
NEW
485
    access_count = get_access_count({function_data.module_name, function_data.function_name, function_data.arity})
×
486
    
NEW
487
    complexity > 5 or access_count > 10
×
488
  end
489
  
490
  defp should_load_dfg?(function_data) do
491
    # Load DFG for functions with data flow analysis needs
NEW
492
    has_variables = function_has_variables?(function_data.ast)
×
NEW
493
    access_count = get_access_count({function_data.module_name, function_data.function_name, function_data.arity})
×
494
    
NEW
495
    has_variables and access_count > 5
×
496
  end
497
  
498
  defp perform_cache_warming() do
NEW
499
    Logger.debug("Performing cache warming")
×
500
    
501
    # Warm up frequently accessed modules
NEW
502
    frequently_accessed_modules = get_frequently_accessed_modules()
×
503
    
NEW
504
    Enum.each(frequently_accessed_modules, fn module_name ->
×
NEW
505
      cache_key = @module_cache_prefix <> to_string(module_name)
×
506
      
NEW
507
      case MemoryManager.cache_get(:query, cache_key) do
×
508
        :miss ->
509
          # Pre-load into cache
NEW
510
          case EnhancedRepository.get_enhanced_module(module_name) do
×
511
            {:ok, module_data} ->
NEW
512
              MemoryManager.cache_put(:query, cache_key, module_data)
×
NEW
513
            _ ->
×
514
              :ok
515
          end
NEW
516
        _ ->
×
517
          :ok
518
      end
519
    end)
520
  end
521
  
522
  defp perform_ets_optimization() do
NEW
523
    Logger.debug("Performing ETS optimization")
×
524
    
525
    # Optimize table structures based on access patterns
526
    # This could include reordering data, compacting tables, etc.
527
    
528
    # For now, just ensure tables are properly configured
529
    :ok
530
  end
531
  
532
  defp perform_optimization_cycle() do
NEW
533
    Logger.debug("Performing optimization cycle")
×
534
    
535
    # Trigger memory cleanup if needed
NEW
536
    {:ok, memory_stats} = MemoryManager.monitor_memory_usage()
×
537
    
NEW
538
    if memory_stats.memory_usage_percent > 70 do
×
NEW
539
      MemoryManager.cleanup_unused_data(max_age: 3600)
×
540
    end
541
    
542
    # Compress old analysis data
NEW
543
    if memory_stats.memory_usage_percent > 60 do
×
NEW
544
      MemoryManager.compress_old_analysis(access_threshold: 5, age_threshold: 1800)
×
545
    end
546
  end
547
  
548
  defp track_access(identifier, access_type) do
549
    # Track access patterns for optimization
NEW
550
    current_time = System.monotonic_time(:second)
×
551
    
NEW
552
    case :ets.lookup(:ast_repo_access_tracking, identifier) do
×
553
      [{^identifier, _last_access, access_count}] ->
NEW
554
        new_count = if access_type == :cache_hit, do: access_count + 1, else: access_count
×
NEW
555
        :ets.insert(:ast_repo_access_tracking, {identifier, current_time, new_count})
×
556
      
557
      [] ->
NEW
558
        :ets.insert(:ast_repo_access_tracking, {identifier, current_time, 1})
×
559
    end
560
  end
561
  
562
  defp generate_query_cache_key(query_type, params) do
563
    # Generate deterministic cache key for query
NEW
564
    param_hash = :crypto.hash(:md5, :erlang.term_to_binary(params))
×
565
    |> Base.encode16(case: :lower)
566
    
NEW
567
    @query_cache_prefix <> "#{query_type}:#{param_hash}"
×
568
  end
569
  
570
  defp preprocess_ast_for_storage(ast) do
571
    # Optimize AST structure for storage
572
    # This could include removing unnecessary metadata, normalizing structures, etc.
NEW
573
    ast
×
574
  end
575
  
576
  defp estimate_ast_size(ast) do
577
    # Estimate AST size in bytes
NEW
578
    :erlang.external_size(ast)
×
579
  end
580
  
581
  defp get_function_complexity(function_data) do
582
    # Extract complexity from function data
NEW
583
    case function_data.complexity_metrics do
×
NEW
584
      %{cyclomatic_complexity: complexity} -> complexity
×
NEW
585
      _ -> 1
×
586
    end
587
  end
588
  
589
  defp function_has_variables?(ast) do
590
    # Check if function AST contains variable operations
591
    # Simplified check - in practice would traverse AST
NEW
592
    is_tuple(ast) and tuple_size(ast) > 0
×
593
  end
594
  
595
  defp get_access_count(identifier) do
NEW
596
    case :ets.lookup(:ast_repo_access_tracking, identifier) do
×
NEW
597
      [{^identifier, _last_access, access_count}] -> access_count
×
NEW
598
      [] -> 0
×
599
    end
600
  end
601
  
602
  defp get_frequently_accessed_modules() do
603
    # Get modules with high access counts
604
    :ets.tab2list(:ast_repo_access_tracking)
605
    |> Enum.filter(fn {identifier, _time, count} ->
NEW
606
      is_atom(identifier) and count > 10
×
607
    end)
NEW
608
    |> Enum.map(fn {module, _time, _count} -> module end)
×
NEW
609
    |> Enum.take(20)  # Top 20 most accessed
×
610
  end
611
  
NEW
612
  defp queue_for_batch_processing(_module_name, _ast, _opts) do
×
613
    # In a real implementation, this would queue items for batch processing
614
    :ok
615
  end
616
  
617
  defp init_optimization_stats() do
NEW
618
    %{
×
619
      modules_optimized: 0,
620
      functions_optimized: 0,
621
      cache_optimizations: 0,
622
      memory_optimizations: 0,
623
      query_optimizations: 0,
624
      total_time_saved_ms: 0
625
    }
626
  end
627
  
628
  defp init_cache_stats() do
NEW
629
    %{
×
630
      cache_hits: 0,
631
      cache_misses: 0,
632
      cache_evictions: 0,
633
      cache_warming_cycles: 0
634
    }
635
  end
636
  
637
  defp init_batch_stats() do
NEW
638
    %{
×
639
      batches_processed: 0,
640
      total_items_batched: 0,
641
      average_batch_time_ms: 0,
642
      batch_efficiency_ratio: 0.0
643
    }
644
  end
645
  
646
  defp init_lazy_loading_stats() do
NEW
647
    %{
×
648
      lazy_loads_triggered: 0,
649
      lazy_loads_avoided: 0,
650
      memory_saved_bytes: 0,
651
      time_saved_ms: 0
652
    }
653
  end
654
  
655
  defp update_optimization_stats(stats, operation_type, duration_us) do
NEW
656
    duration_ms = duration_us / 1000
×
657
    
NEW
658
    case operation_type do
×
659
      :module_storage ->
NEW
660
        %{stats | 
×
NEW
661
          modules_optimized: stats.modules_optimized + 1,
×
NEW
662
          total_time_saved_ms: stats.total_time_saved_ms + max(0, 10 - duration_ms)
×
663
        }
664
      
665
      :function_storage ->
NEW
666
        %{stats | 
×
NEW
667
          functions_optimized: stats.functions_optimized + 1,
×
NEW
668
          total_time_saved_ms: stats.total_time_saved_ms + max(0, 20 - duration_ms)
×
669
        }
670
      
671
      :query_optimization ->
NEW
672
        %{stats | 
×
NEW
673
          query_optimizations: stats.query_optimizations + 1,
×
NEW
674
          total_time_saved_ms: stats.total_time_saved_ms + max(0, 100 - duration_ms)
×
675
        }
676
      
677
      _ ->
NEW
678
        stats
×
679
    end
680
  end
681
  
682
  defp update_batch_stats(stats, item_count, duration_us) do
NEW
683
    duration_ms = duration_us / 1000
×
NEW
684
    new_total_items = stats.total_items_batched + item_count
×
NEW
685
    new_batch_count = stats.batches_processed + 1
×
686
    
NEW
687
    new_average = if new_batch_count > 0 do
×
NEW
688
      ((stats.average_batch_time_ms * stats.batches_processed) + duration_ms) / new_batch_count
×
689
    else
NEW
690
      duration_ms
×
691
    end
692
    
693
    # Calculate efficiency ratio (items per ms)
NEW
694
    efficiency = if duration_ms > 0, do: item_count / duration_ms, else: 0.0
×
695
    
NEW
696
    %{stats |
×
697
      batches_processed: new_batch_count,
698
      total_items_batched: new_total_items,
699
      average_batch_time_ms: new_average,
700
      batch_efficiency_ratio: efficiency
701
    }
702
  end
703
  
704
  defp schedule_cache_warming() do
NEW
705
    Process.send_after(self(), :cache_warming, @cache_warming_interval)
×
706
  end
707
  
708
  defp schedule_optimization_cycle() do
NEW
709
    Process.send_after(self(), :optimization_cycle, @optimization_interval)
×
710
  end
711
end 
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc