• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

nshkrdotcom / ElixirScope / 83134b8f55cc7142f7a918aeb2e46c6944bc57c6

28 May 2025 03:15PM UTC coverage: 60.013% (-0.6%) from 60.612%
83134b8f55cc7142f7a918aeb2e46c6944bc57c6

push

github

NSHkr
Performance Optimization & Memory Management 1072 tests, 0 failures, 76 excluded

189 of 445 new or added lines in 2 files covered. (42.47%)

2 existing lines in 2 files now uncovered.

5679 of 9463 relevant lines covered (60.01%)

3649.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

79.41
/lib/elixir_scope/ast_repository/memory_manager.ex
1
defmodule ElixirScope.ASTRepository.MemoryManager do
2
  @moduledoc """
3
  Comprehensive memory management for the Enhanced AST Repository.
4
  
5
  Provides intelligent memory monitoring, cleanup, compression, and caching
6
  strategies to handle production-scale projects with 1000+ modules.
7
  
8
  ## Features
9
  
10
  - **Memory Monitoring**: Real-time tracking of repository memory usage
11
  - **Intelligent Cleanup**: Remove stale and unused AST data
12
  - **Data Compression**: Compress infrequently accessed analysis data
13
  - **LRU Caching**: Least Recently Used cache for query optimization
14
  - **Memory Pressure Handling**: Multi-level response to memory constraints
15
  
16
  ## Performance Targets
17
  
18
  - Memory usage: <500MB for 1000 modules
19
  - Query response: <100ms for 95th percentile
20
  - Cache hit ratio: >80% for repeated queries
21
  - Memory cleanup: <10ms per cleanup cycle
22
  
23
  ## Memory Pressure Levels
24
  
25
  1. **Level 1** (80% memory): Clear query caches
26
  2. **Level 2** (90% memory): Compress old analysis data
27
  3. **Level 3** (95% memory): Remove unused module data
28
  4. **Level 4** (98% memory): Emergency cleanup and GC
29
  
30
  ## Examples
31
  
32
      # Start memory monitoring
33
      {:ok, _pid} = MemoryManager.start_link()
34
      
35
      # Monitor memory usage
36
      {:ok, stats} = MemoryManager.monitor_memory_usage()
37
      
38
      # Cleanup unused data
39
      :ok = MemoryManager.cleanup_unused_data(max_age: 3600)
40
      
41
      # Handle memory pressure
42
      :ok = MemoryManager.memory_pressure_handler(:level_2)
43
  """
44
  
45
  use GenServer
46
  require Logger
47
  
48
  alias ElixirScope.ASTRepository.EnhancedRepository
49
  
50
  # Memory management configuration
51
  @memory_check_interval 30_000      # 30 seconds
52
  @cleanup_interval 300_000          # 5 minutes
53
  @compression_interval 600_000      # 10 minutes
54
  
55
  # Cache configuration
56
  @query_cache_ttl 60_000           # 1 minute
57
  @analysis_cache_ttl 300_000       # 5 minutes  
58
  @cpg_cache_ttl 600_000            # 10 minutes
59
  @max_cache_entries 1000
60
  
61
  # Memory pressure thresholds (percentage of available memory)
62
  @memory_pressure_level_1 80
63
  @memory_pressure_level_2 90
64
  @memory_pressure_level_3 95
65
  @memory_pressure_level_4 98
66
  
67
  # ETS tables for caching and monitoring
68
  @query_cache_table :ast_repo_query_cache
69
  @analysis_cache_table :ast_repo_analysis_cache
70
  @cpg_cache_table :ast_repo_cpg_cache
71
  @memory_stats_table :ast_repo_memory_stats
72
  @access_tracking_table :ast_repo_access_tracking
73
  
74
  defstruct [
75
    :memory_stats,
76
    :cache_stats,
77
    :cleanup_stats,
78
    :compression_stats,
79
    :pressure_level,
80
    :last_cleanup,
81
    :last_compression,
82
    :monitoring_enabled
83
  ]
84
  
85
  @type memory_stats :: %{
86
    total_memory: non_neg_integer(),
87
    repository_memory: non_neg_integer(),
88
    cache_memory: non_neg_integer(),
89
    ets_memory: non_neg_integer(),
90
    process_memory: non_neg_integer(),
91
    memory_usage_percent: float(),
92
    available_memory: non_neg_integer()
93
  }
94
  
95
  @type cache_stats :: %{
96
    query_cache_size: non_neg_integer(),
97
    analysis_cache_size: non_neg_integer(),
98
    cpg_cache_size: non_neg_integer(),
99
    total_cache_hits: non_neg_integer(),
100
    total_cache_misses: non_neg_integer(),
101
    cache_hit_ratio: float(),
102
    evictions: non_neg_integer()
103
  }
104
  
105
  @type cleanup_stats :: %{
106
    modules_cleaned: non_neg_integer(),
107
    data_removed_bytes: non_neg_integer(),
108
    last_cleanup_duration: non_neg_integer(),
109
    total_cleanups: non_neg_integer()
110
  }
111
  
112
  @type compression_stats :: %{
113
    modules_compressed: non_neg_integer(),
114
    compression_ratio: float(),
115
    space_saved_bytes: non_neg_integer(),
116
    last_compression_duration: non_neg_integer(),
117
    total_compressions: non_neg_integer()
118
  }
119
  
120
  # GenServer API
121
  
122
  def start_link(opts \\ []) do
123
    GenServer.start_link(__MODULE__, opts, name: __MODULE__)
39✔
124
  end
125
  
126
  def init(opts) do
127
    # Initialize ETS tables for caching and monitoring
128
    init_ets_tables()
39✔
129
    
130
    # Schedule periodic tasks
131
    schedule_memory_check()
39✔
132
    schedule_cleanup()
39✔
133
    schedule_compression()
39✔
134
    
135
    state = %__MODULE__{
39✔
136
      memory_stats: %{},
137
      cache_stats: init_cache_stats(),
138
      cleanup_stats: init_cleanup_stats(),
139
      compression_stats: init_compression_stats(),
140
      pressure_level: :normal,
141
      last_cleanup: System.monotonic_time(:millisecond),
142
      last_compression: System.monotonic_time(:millisecond),
143
      monitoring_enabled: Keyword.get(opts, :monitoring_enabled, true)
144
    }
145
    
146
    Logger.info("MemoryManager started with monitoring enabled: #{state.monitoring_enabled}")
39✔
147
    {:ok, state}
148
  end
149
  
150
  # Public API
151
  
152
  @doc """
153
  Monitors current memory usage of the AST Repository.
154
  
155
  Returns comprehensive memory statistics including total memory,
156
  repository-specific memory, cache usage, and memory pressure level.
157
  
158
  ## Returns
159
  
160
  - `{:ok, memory_stats}` - Current memory statistics
161
  - `{:error, reason}` - Monitoring failed
162
  
163
  ## Examples
164
  
165
      {:ok, stats} = MemoryManager.monitor_memory_usage()
166
      # stats.memory_usage_percent => 45.2
167
      # stats.repository_memory => 125_000_000  # bytes
168
  """
169
  @spec monitor_memory_usage() :: {:ok, memory_stats()} | {:error, term()}
170
  def monitor_memory_usage() do
171
    GenServer.call(__MODULE__, :monitor_memory_usage)
29✔
172
  end
173
  
174
  @doc """
175
  Cleans up unused AST data based on access patterns and age.
176
  
177
  Removes stale module data, expired cache entries, and unused
178
  analysis results to free memory.
179
  
180
  ## Options
181
  
182
  - `:max_age` - Maximum age in seconds for data retention (default: 3600)
183
  - `:force` - Force cleanup regardless of memory pressure (default: false)
184
  - `:dry_run` - Show what would be cleaned without actually cleaning (default: false)
185
  
186
  ## Returns
187
  
188
  - `:ok` - Cleanup completed successfully
189
  - `{:error, reason}` - Cleanup failed
190
  
191
  ## Examples
192
  
193
      # Clean data older than 1 hour
194
      :ok = MemoryManager.cleanup_unused_data(max_age: 3600)
195
      
196
      # Force cleanup regardless of memory pressure
197
      :ok = MemoryManager.cleanup_unused_data(force: true)
198
  """
199
  @spec cleanup_unused_data(keyword()) :: :ok | {:error, term()}
200
  def cleanup_unused_data(opts \\ []) do
201
    GenServer.call(__MODULE__, {:cleanup_unused_data, opts}, 30_000)
21✔
202
  end
203
  
204
  @doc """
205
  Compresses infrequently accessed analysis data.
206
  
207
  Uses binary term compression to reduce memory footprint of
208
  large AST structures and analysis results that are rarely accessed.
209
  
210
  ## Options
211
  
212
  - `:access_threshold` - Minimum access count to avoid compression (default: 5)
213
  - `:age_threshold` - Minimum age in seconds before compression (default: 1800)
214
  - `:compression_level` - Compression level 1-9 (default: 6)
215
  
216
  ## Returns
217
  
218
  - `{:ok, compression_stats}` - Compression completed with statistics
219
  - `{:error, reason}` - Compression failed
220
  
221
  ## Examples
222
  
223
      # Compress data accessed less than 3 times
224
      {:ok, stats} = MemoryManager.compress_old_analysis(access_threshold: 3)
225
      # stats.compression_ratio => 0.65  # 35% size reduction
226
  """
227
  @spec compress_old_analysis(keyword()) :: {:ok, compression_stats()} | {:error, term()}
228
  def compress_old_analysis(opts \\ []) do
229
    GenServer.call(__MODULE__, {:compress_old_analysis, opts}, 30_000)
13✔
230
  end
231
  
232
  @doc """
233
  Implements LRU (Least Recently Used) cache for query optimization.
234
  
235
  Manages multi-level caching with different TTLs for queries,
236
  analysis results, and CPG data.
237
  
238
  ## Parameters
239
  
240
  - `cache_type` - Type of cache (:query, :analysis, :cpg)
241
  - `opts` - Cache configuration options
242
  
243
  ## Options
244
  
245
  - `:max_entries` - Maximum cache entries (default: 1000)
246
  - `:ttl` - Time to live in milliseconds (default: varies by type)
247
  - `:eviction_policy` - Eviction policy (:lru, :lfu, :ttl) (default: :lru)
248
  
249
  ## Returns
250
  
251
  - `:ok` - Cache configured successfully
252
  - `{:error, reason}` - Configuration failed
253
  
254
  ## Examples
255
  
256
      # Configure query cache with custom settings
257
      :ok = MemoryManager.implement_lru_cache(:query, max_entries: 500, ttl: 30_000)
258
  """
259
  @spec implement_lru_cache(atom(), keyword()) :: :ok | {:error, term()}
260
  def implement_lru_cache(cache_type, opts \\ []) do
261
    GenServer.call(__MODULE__, {:implement_lru_cache, cache_type, opts})
4✔
262
  end
263
  
264
  @doc """
265
  Handles memory pressure situations with appropriate response levels.
266
  
267
  Implements multi-level memory pressure handling from cache clearing
268
  to emergency cleanup and garbage collection.
269
  
270
  ## Pressure Levels
271
  
272
  - `:level_1` - Clear query caches (80% memory usage)
273
  - `:level_2` - Compress old analysis data (90% memory usage)
274
  - `:level_3` - Remove unused module data (95% memory usage)
275
  - `:level_4` - Emergency cleanup and GC (98% memory usage)
276
  
277
  ## Parameters
278
  
279
  - `pressure_level` - Memory pressure level to handle
280
  
281
  ## Returns
282
  
283
  - `:ok` - Pressure handling completed
284
  - `{:error, reason}` - Handling failed
285
  
286
  ## Examples
287
  
288
      # Handle level 2 memory pressure
289
      :ok = MemoryManager.memory_pressure_handler(:level_2)
290
  """
291
  @spec memory_pressure_handler(atom()) :: :ok | {:error, term()}
292
  def memory_pressure_handler(pressure_level) do
293
    GenServer.call(__MODULE__, {:memory_pressure_handler, pressure_level}, 60_000)
18✔
294
  end
295
  
296
  @doc """
297
  Gets comprehensive memory and performance statistics.
298
  """
299
  @spec get_stats() :: {:ok, map()}
300
  def get_stats() do
301
    GenServer.call(__MODULE__, :get_stats)
3✔
302
  end
303
  
304
  @doc """
305
  Enables or disables memory monitoring.
306
  """
307
  @spec set_monitoring(boolean()) :: :ok
308
  def set_monitoring(enabled) do
309
    GenServer.call(__MODULE__, {:set_monitoring, enabled})
2✔
310
  end
311
  
312
  @doc """
313
  Forces garbage collection and memory optimization.
314
  """
315
  @spec force_gc() :: :ok
316
  def force_gc() do
317
    GenServer.call(__MODULE__, :force_gc)
2✔
318
  end
319
  
320
  # Cache API
321
  
322
  @doc """
323
  Gets a value from the specified cache.
324
  """
325
  @spec cache_get(atom(), term()) :: {:ok, term()} | :miss
326
  def cache_get(cache_type, key) do
327
    try do
1,118✔
328
      table = cache_table_for_type(cache_type)
1,118✔
329
      case :ets.lookup(table, key) do
1,118✔
330
        [{^key, value, timestamp, _access_count}] ->
331
          ttl = cache_ttl_for_type(cache_type)
1,106✔
332
          if System.monotonic_time(:millisecond) - timestamp < ttl do
1,106✔
333
            # Update access count and timestamp
334
            :ets.update_counter(table, key, {4, 1})
1,106✔
335
            :ets.update_element(table, key, {3, System.monotonic_time(:millisecond)})
1,106✔
336
            {:ok, value}
337
          else
338
            # Expired entry
NEW
339
            :ets.delete(table, key)
×
340
            :miss
341
          end
342
        [] ->
12✔
343
          :miss
344
      end
345
    rescue
NEW
346
      _error ->
×
347
        :miss
348
    end
349
  end
350
  
351
  @doc """
352
  Puts a value in the specified cache.
353
  """
354
  @spec cache_put(atom(), term(), term()) :: :ok
355
  def cache_put(cache_type, key, value) do
356
    try do
179✔
357
      table = cache_table_for_type(cache_type)
179✔
358
      timestamp = System.monotonic_time(:millisecond)
179✔
359
      
360
      # Check cache size and evict if necessary
361
      cache_size = :ets.info(table, :size)
179✔
362
      if cache_size >= @max_cache_entries do
179✔
NEW
363
        evict_lru_entries(table, div(@max_cache_entries, 10))  # Evict 10%
×
364
      end
365
      
366
      :ets.insert(table, {key, value, timestamp, 1})
179✔
367
      :ok
368
    rescue
NEW
369
      _error ->
×
370
        :ok  # Fail silently for cache operations
371
    end
372
  end
373
  
374
  @doc """
375
  Clears the specified cache.
376
  """
377
  @spec cache_clear(atom()) :: :ok
378
  def cache_clear(cache_type) do
379
    try do
30✔
380
      table = cache_table_for_type(cache_type)
30✔
381
      :ets.delete_all_objects(table)
30✔
382
      :ok
383
    rescue
NEW
384
      _error ->
×
385
        :ok  # Fail silently for cache operations
386
    end
387
  end
388
  
389
  # GenServer Callbacks
390
  
391
  def handle_call(:monitor_memory_usage, _from, state) do
392
    case collect_memory_stats() do
29✔
393
      {:ok, memory_stats} ->
394
        new_state = %{state | memory_stats: memory_stats}
29✔
395
        {:reply, {:ok, memory_stats}, new_state}
29✔
396
      error ->
NEW
397
        {:reply, error, state}
×
398
    end
399
  end
400
  
401
  def handle_call({:cleanup_unused_data, opts}, _from, state) do
402
    start_time = System.monotonic_time(:millisecond)
21✔
403
    
404
    case perform_cleanup(opts) do
21✔
405
      {:ok, cleanup_result} ->
406
        end_time = System.monotonic_time(:millisecond)
21✔
407
        duration = end_time - start_time
21✔
408
        
409
        new_cleanup_stats = update_cleanup_stats(state.cleanup_stats, cleanup_result, duration)
21✔
410
        new_state = %{state | 
21✔
411
          cleanup_stats: new_cleanup_stats,
412
          last_cleanup: end_time
413
        }
414
        
415
        {:reply, :ok, new_state}
21✔
416
      
417
      error ->
NEW
418
        {:reply, error, state}
×
419
    end
420
  end
421
  
422
  def handle_call({:compress_old_analysis, opts}, _from, state) do
423
    start_time = System.monotonic_time(:millisecond)
13✔
424
    
425
    case perform_compression(opts) do
13✔
426
      {:ok, compression_result} ->
427
        end_time = System.monotonic_time(:millisecond)
13✔
428
        duration = end_time - start_time
13✔
429
        
430
        new_compression_stats = update_compression_stats(state.compression_stats, compression_result, duration)
13✔
431
        new_state = %{state | 
13✔
432
          compression_stats: new_compression_stats,
433
          last_compression: end_time
434
        }
435
        
436
        {:reply, {:ok, compression_result}, new_state}
13✔
437
      
438
      error ->
NEW
439
        {:reply, error, state}
×
440
    end
441
  end
442
  
443
  def handle_call({:implement_lru_cache, cache_type, opts}, _from, state) do
444
    case configure_cache(cache_type, opts) do
4✔
445
      :ok ->
446
        {:reply, :ok, state}
4✔
447
      error ->
NEW
448
        {:reply, error, state}
×
449
    end
450
  end
451
  
452
  def handle_call({:memory_pressure_handler, pressure_level}, _from, state) do
453
    case handle_memory_pressure(pressure_level) do
18✔
454
      :ok ->
455
        new_state = %{state | pressure_level: pressure_level}
18✔
456
        {:reply, :ok, new_state}
18✔
457
      error ->
NEW
458
        {:reply, error, state}
×
459
    end
460
  end
461
  
462
  def handle_call(:get_stats, _from, state) do
463
    stats = %{
3✔
464
      memory: state.memory_stats,
3✔
465
      cache: state.cache_stats,
3✔
466
      cleanup: state.cleanup_stats,
3✔
467
      compression: state.compression_stats,
3✔
468
      pressure_level: state.pressure_level,
3✔
469
      monitoring_enabled: state.monitoring_enabled
3✔
470
    }
471
    {:reply, {:ok, stats}, state}
3✔
472
  end
473
  
474
  def handle_call({:set_monitoring, enabled}, _from, state) do
475
    new_state = %{state | monitoring_enabled: enabled}
2✔
476
    {:reply, :ok, new_state}
2✔
477
  end
478
  
479
  def handle_call(:force_gc, _from, state) do
480
    # Force garbage collection
481
    :erlang.garbage_collect()
2✔
482
    
483
    # Force GC on all processes
484
    for pid <- Process.list() do
2✔
485
      if Process.alive?(pid) do
302✔
486
        :erlang.garbage_collect(pid)
302✔
487
      end
488
    end
489
    
490
    {:reply, :ok, state}
2✔
491
  end
492
  
493
  def handle_info(:memory_check, state) do
NEW
494
    if state.monitoring_enabled do
×
NEW
495
      case collect_memory_stats() do
×
496
        {:ok, memory_stats} ->
497
          # Check for memory pressure
NEW
498
          pressure_level = determine_pressure_level(memory_stats.memory_usage_percent)
×
499
          
NEW
500
          if pressure_level != :normal and pressure_level != state.pressure_level do
×
NEW
501
            Logger.warning("Memory pressure detected: #{pressure_level} (#{memory_stats.memory_usage_percent}%)")
×
NEW
502
            handle_memory_pressure(pressure_level)
×
503
          end
504
          
NEW
505
          new_state = %{state | 
×
506
            memory_stats: memory_stats,
507
            pressure_level: pressure_level
508
          }
509
          
NEW
510
          schedule_memory_check()
×
511
          {:noreply, new_state}
512
        
513
        {:error, reason} ->
NEW
514
          Logger.error("Memory monitoring failed: #{inspect(reason)}")
×
NEW
515
          schedule_memory_check()
×
516
          {:noreply, state}
517
      end
518
    else
NEW
519
      schedule_memory_check()
×
520
      {:noreply, state}
521
    end
522
  end
523
  
524
  def handle_info(:cleanup, state) do
525
    # Perform automatic cleanup
NEW
526
    perform_cleanup([max_age: 3600])
×
527
    
NEW
528
    schedule_cleanup()
×
529
    {:noreply, state}
530
  end
531
  
532
  def handle_info(:compression, state) do
533
    # Perform automatic compression
NEW
534
    perform_compression([access_threshold: 5, age_threshold: 1800])
×
535
    
NEW
536
    schedule_compression()
×
537
    {:noreply, state}
538
  end
539
  
540
  # Private Implementation
541
  
542
  defp init_ets_tables() do
543
    # Query cache: {key, value, timestamp, access_count}
544
    :ets.new(@query_cache_table, [:named_table, :public, :set, {:read_concurrency, true}])
39✔
545
    
546
    # Analysis cache: {key, value, timestamp, access_count}
547
    :ets.new(@analysis_cache_table, [:named_table, :public, :set, {:read_concurrency, true}])
39✔
548
    
549
    # CPG cache: {key, value, timestamp, access_count}
550
    :ets.new(@cpg_cache_table, [:named_table, :public, :set, {:read_concurrency, true}])
39✔
551
    
552
    # Memory statistics: {metric, value, timestamp}
553
    :ets.new(@memory_stats_table, [:named_table, :public, :set])
39✔
554
    
555
    # Access tracking: {module, last_access, access_count}
556
    :ets.new(@access_tracking_table, [:named_table, :public, :set])
39✔
557
  end
558
  
559
  defp collect_memory_stats() do
560
    try do
29✔
561
      # Get system memory info
562
      memory_info = :erlang.memory()
29✔
563
      total_memory = Keyword.get(memory_info, :total, 0)
29✔
564
      
565
      # Get repository-specific memory usage
566
      repository_memory = calculate_repository_memory()
29✔
567
      cache_memory = calculate_cache_memory()
29✔
568
      ets_memory = Keyword.get(memory_info, :ets, 0)
29✔
569
      process_memory = Keyword.get(memory_info, :processes, 0)
29✔
570
      
571
      # Calculate available system memory (simplified)
572
      available_memory = get_available_system_memory()
29✔
573
      memory_usage_percent = if available_memory > 0 do
29✔
574
        (total_memory / available_memory) * 100
29✔
575
      else
576
        0.0
577
      end
578
      
579
      stats = %{
29✔
580
        total_memory: total_memory,
581
        repository_memory: repository_memory,
582
        cache_memory: cache_memory,
583
        ets_memory: ets_memory,
584
        process_memory: process_memory,
585
        memory_usage_percent: memory_usage_percent,
586
        available_memory: available_memory
587
      }
588
      
589
      # Store in ETS for historical tracking
590
      timestamp = System.monotonic_time(:millisecond)
29✔
591
      :ets.insert(@memory_stats_table, {:memory_stats, stats, timestamp})
29✔
592
      
593
      {:ok, stats}
594
    rescue
NEW
595
      error ->
×
596
        {:error, {:memory_collection_failed, error}}
597
    end
598
  end
599
  
600
  defp calculate_repository_memory() do
601
    # Calculate memory used by Enhanced Repository ETS tables
602
    tables = [:enhanced_ast_repository, :runtime_correlator_main, 
29✔
603
              :runtime_correlator_context_cache, :runtime_correlator_trace_cache]
604
    
605
    Enum.reduce(tables, 0, fn table, acc ->
29✔
606
      case :ets.info(table, :memory) do
116✔
607
        :undefined -> acc
100✔
608
        memory -> acc + memory * :erlang.system_info(:wordsize)
16✔
609
      end
610
    end)
611
  end
612
  
613
  defp calculate_cache_memory() do
614
    tables = [@query_cache_table, @analysis_cache_table, @cpg_cache_table]
29✔
615
    
616
    Enum.reduce(tables, 0, fn table, acc ->
29✔
617
      case :ets.info(table, :memory) do
87✔
NEW
618
        :undefined -> acc
×
619
        memory -> acc + memory * :erlang.system_info(:wordsize)
87✔
620
      end
621
    end)
622
  end
623
  
624
  defp get_available_system_memory() do
625
    # Simplified system memory detection
626
    # In production, this would use system-specific methods
627
    case :os.type() do
29✔
628
      {:unix, :linux} ->
629
        # Read from /proc/meminfo if available
630
        case File.read("/proc/meminfo") do
29✔
631
          {:ok, content} ->
632
            parse_meminfo(content)
29✔
633
          _ ->
634
            # Fallback to a reasonable default (8GB)
NEW
635
            8 * 1024 * 1024 * 1024
×
636
        end
637
      _ ->
638
        # Default for other systems
NEW
639
        8 * 1024 * 1024 * 1024
×
640
    end
641
  end
642
  
643
  defp parse_meminfo(content) do
644
    # Parse MemTotal from /proc/meminfo
645
    case Regex.run(~r/MemTotal:\s+(\d+)\s+kB/, content) do
29✔
646
      [_, kb_str] ->
647
        String.to_integer(kb_str) * 1024  # Convert KB to bytes
29✔
648
      _ ->
NEW
649
        8 * 1024 * 1024 * 1024  # Default 8GB
×
650
    end
651
  end
652
  
653
  defp determine_pressure_level(memory_usage_percent) do
NEW
654
    cond do
×
NEW
655
      memory_usage_percent >= @memory_pressure_level_4 -> :level_4
×
NEW
656
      memory_usage_percent >= @memory_pressure_level_3 -> :level_3
×
NEW
657
      memory_usage_percent >= @memory_pressure_level_2 -> :level_2
×
NEW
658
      memory_usage_percent >= @memory_pressure_level_1 -> :level_1
×
NEW
659
      true -> :normal
×
660
    end
661
  end
662
  
663
  defp handle_memory_pressure(pressure_level) do
664
    Logger.info("Handling memory pressure: #{pressure_level}")
18✔
665
    
666
    case pressure_level do
18✔
667
      :level_1 ->
668
        # Clear query caches
669
        cache_clear(:query)
4✔
670
        Logger.info("Level 1: Cleared query caches")
4✔
671
        
672
      :level_2 ->
673
        # Clear query caches and compress old analysis
674
        cache_clear(:query)
5✔
675
        perform_compression([access_threshold: 3, age_threshold: 900])
5✔
676
        Logger.info("Level 2: Cleared caches and compressed old analysis")
5✔
677
        
678
      :level_3 ->
679
        # Clear all caches and remove unused module data
680
        cache_clear(:query)
4✔
681
        cache_clear(:analysis)
4✔
682
        perform_cleanup([max_age: 1800, force: true])
4✔
683
        Logger.info("Level 3: Cleared all caches and removed unused data")
4✔
684
        
685
      :level_4 ->
686
        # Emergency cleanup and garbage collection
687
        cache_clear(:query)
4✔
688
        cache_clear(:analysis)
4✔
689
        cache_clear(:cpg)
4✔
690
        perform_cleanup([max_age: 900, force: true])
4✔
691
        :erlang.garbage_collect()
4✔
692
        Logger.warning("Level 4: Emergency cleanup and GC performed")
4✔
693
        
694
      _ ->
1✔
695
        :ok
696
    end
697
    
698
    :ok
699
  end
700
  
701
  defp perform_cleanup(opts) do
702
    max_age = Keyword.get(opts, :max_age, 3600)
29✔
703
    force = Keyword.get(opts, :force, false)
29✔
704
    dry_run = Keyword.get(opts, :dry_run, false)
29✔
705
    
706
    # Validate max_age parameter
707
    max_age = case max_age do
29✔
708
      age when is_integer(age) and age >= 0 -> age
27✔
709
      _ -> 3600  # Default to 1 hour if invalid
2✔
710
    end
711
    
712
    current_time = System.monotonic_time(:second)
29✔
713
    cutoff_time = current_time - max_age
29✔
714
    
715
    # Find modules to clean based on access patterns
716
    modules_to_clean = find_modules_to_clean(cutoff_time, force)
29✔
717
    
718
    if dry_run do
29✔
719
      {:ok, %{modules_to_clean: length(modules_to_clean), dry_run: true}}
720
    else
721
      # Perform actual cleanup
722
      {modules_cleaned, bytes_removed} = cleanup_modules(modules_to_clean)
28✔
723
      
724
      # Clean expired cache entries
725
      clean_expired_cache_entries()
28✔
726
      
727
      {:ok, %{
728
        modules_cleaned: modules_cleaned,
729
        data_removed_bytes: bytes_removed,
730
        dry_run: false
731
      }}
732
    end
733
  end
734
  
735
  defp find_modules_to_clean(cutoff_time, force) do
736
    # Get all tracked modules and their access patterns
737
    :ets.tab2list(@access_tracking_table)
738
    |> Enum.filter(fn {_module, last_access, _access_count} ->
739
      force or last_access < cutoff_time
208✔
740
    end)
741
    |> Enum.map(fn {module, _last_access, _access_count} -> module end)
29✔
742
  end
743
  
744
  defp cleanup_modules(modules) do
745
    Enum.reduce(modules, {0, 0}, fn module, {count, bytes} ->
28✔
746
      case cleanup_module_data(module) do
128✔
747
        {:ok, removed_bytes} ->
128✔
748
          {count + 1, bytes + removed_bytes}
NEW
749
        {:error, _} ->
×
750
          {count, bytes}
751
      end
752
    end)
753
  end
754
  
755
  defp cleanup_module_data(module) do
756
    # Remove module data from Enhanced Repository
757
    # This is a simplified implementation
758
    try do
128✔
759
      # Calculate approximate size before removal
760
      size_before = estimate_module_size(module)
128✔
761
      
762
      # Remove from access tracking
763
      :ets.delete(@access_tracking_table, module)
128✔
764
      
765
      # In a real implementation, this would remove from EnhancedRepository
766
      # EnhancedRepository.remove_module(module)
767
      
768
      {:ok, size_before}
769
    rescue
NEW
770
      error ->
×
771
        {:error, error}
772
    end
773
  end
774
  
775
  defp estimate_module_size(_module) do
776
    # Simplified size estimation
777
    # In practice, this would calculate actual memory usage
778
    64 * 1024  # 64KB average
128✔
779
  end
780
  
781
  defp clean_expired_cache_entries() do
782
    current_time = System.monotonic_time(:millisecond)
28✔
783
    
784
    # Clean each cache table
785
    clean_expired_entries(@query_cache_table, current_time, @query_cache_ttl)
28✔
786
    clean_expired_entries(@analysis_cache_table, current_time, @analysis_cache_ttl)
28✔
787
    clean_expired_entries(@cpg_cache_table, current_time, @cpg_cache_ttl)
28✔
788
  end
789
  
790
  defp clean_expired_entries(table, current_time, ttl) do
791
    expired_keys = :ets.foldl(fn {key, _value, timestamp, _access_count}, acc ->
84✔
792
      if current_time - timestamp > ttl do
314✔
793
        [key | acc]
794
      else
795
        acc
314✔
796
      end
797
    end, [], table)
798
    
799
    Enum.each(expired_keys, fn key ->
84✔
NEW
800
      :ets.delete(table, key)
×
801
    end)
802
  end
803
  
804
  defp perform_compression(opts) do
805
    access_threshold = Keyword.get(opts, :access_threshold, 5)
18✔
806
    age_threshold = Keyword.get(opts, :age_threshold, 1800)
18✔
807
    compression_level = Keyword.get(opts, :compression_level, 6)
18✔
808
    
809
    current_time = System.monotonic_time(:second)
18✔
810
    cutoff_time = current_time - age_threshold
18✔
811
    
812
    # Find data to compress
813
    candidates = find_compression_candidates(cutoff_time, access_threshold)
18✔
814
    
815
    # Perform compression
816
    {compressed_count, total_savings} = compress_candidates(candidates, compression_level)
18✔
817
    
818
    compression_ratio = if compressed_count > 0 do
18✔
819
      total_savings / (total_savings + compressed_count * 1024)  # Simplified calculation
9✔
820
    else
821
      0.0
822
    end
823
    
824
    {:ok, %{
825
      modules_compressed: compressed_count,
826
      compression_ratio: compression_ratio,
827
      space_saved_bytes: total_savings
828
    }}
829
  end
830
  
831
  defp find_compression_candidates(cutoff_time, access_threshold) do
832
    :ets.tab2list(@access_tracking_table)
833
    |> Enum.filter(fn {_module, last_access, access_count} ->
834
      last_access < cutoff_time and access_count < access_threshold
117✔
835
    end)
836
    |> Enum.map(fn {module, _last_access, _access_count} -> module end)
18✔
837
  end
838
  
839
  defp compress_candidates(candidates, compression_level) do
840
    Enum.reduce(candidates, {0, 0}, fn module, {count, savings} ->
18✔
841
      case compress_module_data(module, compression_level) do
10✔
842
        {:ok, saved_bytes} ->
10✔
843
          {count + 1, savings + saved_bytes}
NEW
844
        {:error, _} ->
×
845
          {count, savings}
846
      end
847
    end)
848
  end
849
  
850
  defp compress_module_data(_module, _compression_level) do
851
    # Simplified compression simulation
852
    # In practice, this would compress actual AST data using :zlib
853
    original_size = 64 * 1024  # 64KB
10✔
854
    compressed_size = div(original_size * 65, 100)  # 35% compression
10✔
855
    savings = original_size - compressed_size
10✔
856
    
857
    {:ok, savings}
858
  end
859
  
860
  defp configure_cache(_cache_type, _opts) do
4✔
861
    # Cache configuration is handled during initialization
862
    # This could be extended for runtime reconfiguration
863
    :ok
864
  end
865
  
866
  defp evict_lru_entries(table, count) do
867
    # Get entries sorted by access time (oldest first)
NEW
868
    entries = :ets.tab2list(table)
×
NEW
869
    |> Enum.sort_by(fn {_key, _value, timestamp, _access_count} -> timestamp end)
×
870
    |> Enum.take(count)
871
    
872
    # Remove oldest entries
NEW
873
    Enum.each(entries, fn {key, _value, _timestamp, _access_count} ->
×
NEW
874
      :ets.delete(table, key)
×
875
    end)
876
  end
877
  
878
  defp cache_table_for_type(:query), do: @query_cache_table
1,308✔
879
  defp cache_table_for_type(:analysis), do: @analysis_cache_table
12✔
880
  defp cache_table_for_type(:cpg), do: @cpg_cache_table
6✔
881
  defp cache_table_for_type(_), do: @query_cache_table  # Default to query cache for invalid types
1✔
882
  
883
  defp cache_ttl_for_type(:query), do: @query_cache_ttl
1,106✔
NEW
884
  defp cache_ttl_for_type(:analysis), do: @analysis_cache_ttl
×
NEW
885
  defp cache_ttl_for_type(:cpg), do: @cpg_cache_ttl
×
NEW
886
  defp cache_ttl_for_type(_), do: @query_cache_ttl  # Default to query cache TTL for invalid types
×
887
  
888
  defp init_cache_stats() do
889
    %{
39✔
890
      query_cache_size: 0,
891
      analysis_cache_size: 0,
892
      cpg_cache_size: 0,
893
      total_cache_hits: 0,
894
      total_cache_misses: 0,
895
      cache_hit_ratio: 0.0,
896
      evictions: 0
897
    }
898
  end
899
  
900
  defp init_cleanup_stats() do
901
    %{
39✔
902
      modules_cleaned: 0,
903
      data_removed_bytes: 0,
904
      last_cleanup_duration: 0,
905
      total_cleanups: 0
906
    }
907
  end
908
  
909
  defp init_compression_stats() do
910
    %{
39✔
911
      modules_compressed: 0,
912
      compression_ratio: 0.0,
913
      space_saved_bytes: 0,
914
      last_compression_duration: 0,
915
      total_compressions: 0
916
    }
917
  end
918
  
919
  defp update_cleanup_stats(stats, result, duration) do
920
    case result do
21✔
921
      %{dry_run: true, modules_to_clean: count} ->
922
        # Dry run - don't update actual cleanup stats, just duration
923
        %{stats |
1✔
924
          last_cleanup_duration: duration,
925
          total_cleanups: stats.total_cleanups + 1
1✔
926
        }
927
      
928
      %{modules_cleaned: cleaned, data_removed_bytes: bytes, dry_run: false} ->
929
        # Actual cleanup
930
        %{stats |
20✔
931
          modules_cleaned: stats.modules_cleaned + cleaned,
20✔
932
          data_removed_bytes: stats.data_removed_bytes + bytes,
20✔
933
          last_cleanup_duration: duration,
934
          total_cleanups: stats.total_cleanups + 1
20✔
935
        }
936
      
937
      _ ->
938
        # Fallback for unexpected result structure
NEW
939
        %{stats |
×
940
          last_cleanup_duration: duration,
NEW
941
          total_cleanups: stats.total_cleanups + 1
×
942
        }
943
    end
944
  end
945
  
946
  defp update_compression_stats(stats, result, duration) do
947
    %{stats |
13✔
948
      modules_compressed: stats.modules_compressed + result.modules_compressed,
13✔
949
      compression_ratio: result.compression_ratio,
13✔
950
      space_saved_bytes: stats.space_saved_bytes + result.space_saved_bytes,
13✔
951
      last_compression_duration: duration,
952
      total_compressions: stats.total_compressions + 1
13✔
953
    }
954
  end
955
  
956
  defp schedule_memory_check() do
957
    Process.send_after(self(), :memory_check, @memory_check_interval)
39✔
958
  end
959
  
960
  defp schedule_cleanup() do
961
    Process.send_after(self(), :cleanup, @cleanup_interval)
39✔
962
  end
963
  
964
  defp schedule_compression() do
965
    Process.send_after(self(), :compression, @compression_interval)
39✔
966
  end
967
end 
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc