• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

getdozer / dozer / 6012004045

29 Aug 2023 12:32PM UTC coverage: 76.365% (-0.2%) from 76.528%
6012004045

push

github

web-flow
feat: `on_event` can subscribe to multiple endpoints at once (#1881)

* fix: Push event was not enabled if `flags` is not present

* feat: `on_event` can subscribe to multiple endpoints at once

78 of 78 new or added lines in 7 files covered. (100.0%)

49014 of 64184 relevant lines covered (76.36%)

48095.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

59.75
/dozer-cli/src/simple/orchestrator.rs
1
use super::executor::{run_dag_executor, Executor};
2
use crate::errors::OrchestrationError;
3
use crate::pipeline::PipelineBuilder;
4
use crate::shutdown::ShutdownReceiver;
5
use crate::simple::build;
6
use crate::simple::helper::validate_config;
7
use crate::utils::{
8
    get_api_security_config, get_app_grpc_config, get_cache_manager_options,
9
    get_checkpoint_factory_options, get_executor_options, get_grpc_config, get_rest_config,
10
    get_storage_config,
11
};
12

13
use crate::{flatten_join_handle, join_handle_map_err};
14
use dozer_api::auth::{Access, Authorizer};
15
use dozer_api::grpc::internal::internal_pipeline_server::start_internal_pipeline_server;
16
use dozer_api::{grpc, rest, CacheEndpoint};
17
use dozer_cache::cache::LmdbRwCacheManager;
18
use dozer_cache::dozer_log::home_dir::HomeDir;
19
use dozer_core::app::AppPipeline;
20
use dozer_core::dag_schemas::DagSchemas;
21
use dozer_types::models::flags::default_push_events;
22
use tokio::select;
23

24
use crate::console_helper::get_colored_text;
25
use crate::console_helper::GREEN;
26
use crate::console_helper::PURPLE;
27
use crate::console_helper::RED;
28
use dozer_core::errors::ExecutionError;
29
use dozer_ingestion::connectors::{get_connector, SourceSchema, TableInfo};
30
use dozer_sql::pipeline::builder::statement_to_pipeline;
31
use dozer_sql::pipeline::errors::PipelineError;
32
use dozer_types::crossbeam::channel::{self, Sender};
33
use dozer_types::indicatif::{MultiProgress, ProgressDrawTarget};
34
use dozer_types::log::info;
35
use dozer_types::models::config::Config;
36
use dozer_types::tracing::error;
37
use futures::stream::FuturesUnordered;
38
use futures::{FutureExt, StreamExt, TryFutureExt};
39
use metrics::{describe_counter, describe_histogram};
40
use std::collections::HashMap;
41
use std::fs;
42
use std::path::PathBuf;
43

44
use std::sync::Arc;
45
use std::thread;
46
use tokio::runtime::Runtime;
47
use tokio::sync::broadcast;
48

×
49
#[derive(Clone)]
60✔
50
pub struct SimpleOrchestrator {
51
    pub config: Config,
52
    pub runtime: Arc<Runtime>,
53
    pub multi_pb: MultiProgress,
54
}
55

56
impl SimpleOrchestrator {
×
57
    pub fn new(config: Config, runtime: Arc<Runtime>, enable_progress: bool) -> Self {
30✔
58
        let progress_draw_target = if enable_progress && atty::is(atty::Stream::Stderr) {
30✔
59
            ProgressDrawTarget::stderr()
×
60
        } else {
×
61
            ProgressDrawTarget::hidden()
30✔
62
        };
63

×
64
        Self {
30✔
65
            config,
30✔
66
            runtime,
30✔
67
            multi_pb: MultiProgress::with_draw_target(progress_draw_target),
30✔
68
        }
30✔
69
    }
30✔
70

×
71
    pub fn run_api(&mut self, shutdown: ShutdownReceiver) -> Result<(), OrchestrationError> {
30✔
72
        describe_histogram!(
30✔
73
            dozer_api::API_LATENCY_HISTOGRAM_NAME,
×
74
            "The api processing latency in seconds"
×
75
        );
×
76
        describe_counter!(
30✔
77
            dozer_api::API_REQUEST_COUNTER_NAME,
×
78
            "Number of requests processed by the api"
×
79
        );
×
80
        self.runtime.block_on(async {
30✔
81
            let mut futures = FuturesUnordered::new();
30✔
82

30✔
83
            // Open `RoCacheEndpoint`s. Streaming operations if necessary.
30✔
84
            let flags = self.config.flags.clone().unwrap_or_default();
30✔
85
            let (operations_sender, operations_receiver) = if flags.dynamic {
30✔
86
                let (sender, receiver) = broadcast::channel(16);
30✔
87
                (Some(sender), Some(receiver))
30✔
88
            } else {
×
89
                (None, None)
×
90
            };
91

×
92
            let internal_grpc_config = get_app_grpc_config(&self.config);
30✔
93
            let app_server_addr = format!(
30✔
94
                "http://{}:{}",
30✔
95
                internal_grpc_config.host, internal_grpc_config.port
30✔
96
            );
30✔
97
            let cache_manager = Arc::new(
30✔
98
                LmdbRwCacheManager::new(get_cache_manager_options(&self.config))
30✔
99
                    .map_err(OrchestrationError::CacheInitFailed)?,
30✔
100
            );
×
101
            let mut cache_endpoints = vec![];
30✔
102
            for endpoint in &self.config.endpoints {
60✔
103
                let (cache_endpoint, handle) = select! {
30✔
104
                    // If we're shutting down, the cache endpoint will fail to connect
×
105
                    _shutdown_future = shutdown.create_shutdown_future() => return Ok(()),
×
106
                    result = CacheEndpoint::new(
30✔
107
                        app_server_addr.clone(),
108
                        &*cache_manager,
109
                        endpoint.clone(),
110
                        Box::pin(shutdown.create_shutdown_future()),
111
                        operations_sender.clone(),
112
                        Some(self.multi_pb.clone()),
113
                    ) => result?
114
                };
×
115
                let cache_name = endpoint.name.clone();
30✔
116
                futures.push(flatten_join_handle(join_handle_map_err(handle, move |e| {
30✔
117
                    if e.is_map_full() {
×
118
                        OrchestrationError::CacheFull(cache_name)
×
119
                    } else {
×
120
                        OrchestrationError::CacheBuildFailed(cache_name, e)
×
121
                    }
×
122
                })));
30✔
123
                cache_endpoints.push(Arc::new(cache_endpoint));
30✔
124
            }
125

126
            // Initialize API Server
×
127
            let rest_config = get_rest_config(&self.config);
30✔
128
            let rest_handle = if rest_config.enabled {
30✔
129
                let security = get_api_security_config(&self.config).cloned();
30✔
130
                let cache_endpoints_for_rest = cache_endpoints.clone();
30✔
131
                let shutdown_for_rest = shutdown.create_shutdown_future();
30✔
132
                let api_server = rest::ApiServer::new(rest_config, security);
30✔
133
                let api_server = api_server
30✔
134
                    .run(cache_endpoints_for_rest, shutdown_for_rest)
30✔
135
                    .map_err(OrchestrationError::ApiInitFailed)?;
30✔
136
                tokio::spawn(api_server.map_err(OrchestrationError::RestServeFailed))
30✔
137
            } else {
×
138
                tokio::spawn(async move { Ok::<(), OrchestrationError>(()) })
×
139
            };
140

141
            // Initialize gRPC Server
×
142
            let grpc_config = get_grpc_config(&self.config);
30✔
143
            let grpc_handle = if grpc_config.enabled {
30✔
144
                let api_security = get_api_security_config(&self.config).cloned();
30✔
145
                let grpc_server = grpc::ApiServer::new(grpc_config, api_security, flags);
30✔
146
                let shutdown = shutdown.create_shutdown_future();
30✔
147
                let grpc_server = grpc_server
30✔
148
                    .run(cache_endpoints, shutdown, operations_receiver)
30✔
149
                    .await
×
150
                    .map_err(OrchestrationError::ApiInitFailed)?;
30✔
151
                tokio::spawn(async move {
30✔
152
                    grpc_server
30✔
153
                        .await
45✔
154
                        .map_err(OrchestrationError::GrpcServeFailed)
30✔
155
                })
30✔
156
            } else {
×
157
                tokio::spawn(async move { Ok::<(), OrchestrationError>(()) })
×
158
            };
159

×
160
            futures.push(flatten_join_handle(rest_handle));
30✔
161
            futures.push(flatten_join_handle(grpc_handle));
30✔
162

×
163
            while let Some(result) = futures.next().await {
120✔
164
                result?;
90✔
165
            }
166

×
167
            Ok::<(), OrchestrationError>(())
30✔
168
        })?;
30✔
169

×
170
        Ok(())
30✔
171
    }
30✔
172

×
173
    pub fn run_apps(
30✔
174
        &mut self,
30✔
175
        shutdown: ShutdownReceiver,
30✔
176
        api_notifier: Option<Sender<bool>>,
30✔
177
    ) -> Result<(), OrchestrationError> {
30✔
178
        let home_dir = HomeDir::new(self.config.home_dir.clone(), self.config.cache_dir.clone());
30✔
179
        let executor = self.runtime.block_on(Executor::new(
30✔
180
            &home_dir,
30✔
181
            &self.config.connections,
30✔
182
            &self.config.sources,
30✔
183
            self.config.sql.as_deref(),
30✔
184
            &self.config.endpoints,
30✔
185
            get_checkpoint_factory_options(&self.config),
30✔
186
            self.multi_pb.clone(),
30✔
187
        ))?;
30✔
188
        let dag_executor = self.runtime.block_on(executor.create_dag_executor(
30✔
189
            &self.runtime,
30✔
190
            get_executor_options(&self.config),
30✔
191
            shutdown.clone(),
30✔
192
            self.config.flags.clone().unwrap_or_default(),
30✔
193
        ))?;
30✔
194

×
195
        let app_grpc_config = get_app_grpc_config(&self.config);
30✔
196
        let internal_server_future = self
30✔
197
            .runtime
30✔
198
            .block_on(start_internal_pipeline_server(
30✔
199
                executor.endpoint_and_logs().to_vec(),
30✔
200
                &app_grpc_config,
30✔
201
                shutdown.create_shutdown_future(),
30✔
202
            ))
30✔
203
            .map_err(OrchestrationError::InternalServerFailed)?;
30✔
204

×
205
        if let Some(api_notifier) = api_notifier {
30✔
206
            api_notifier
30✔
207
                .send(true)
30✔
208
                .expect("Failed to notify API server");
30✔
209
        }
30✔
210

×
211
        let pipeline_future = self
30✔
212
            .runtime
30✔
213
            .spawn_blocking(move || run_dag_executor(dag_executor, shutdown.get_running_flag()));
30✔
214

30✔
215
        let mut futures = FuturesUnordered::new();
30✔
216
        futures.push(
30✔
217
            internal_server_future
30✔
218
                .map_err(OrchestrationError::GrpcServeFailed)
30✔
219
                .boxed(),
30✔
220
        );
30✔
221
        futures.push(flatten_join_handle(pipeline_future).boxed());
30✔
222

30✔
223
        self.runtime.block_on(async move {
30✔
224
            while let Some(result) = futures.next().await {
190✔
225
                result?;
60✔
226
            }
×
227
            Ok(())
30✔
228
        })
30✔
229
    }
30✔
230

231
    #[allow(clippy::type_complexity)]
×
232
    pub fn list_connectors(
×
233
        &self,
×
234
    ) -> Result<HashMap<String, (Vec<TableInfo>, Vec<SourceSchema>)>, OrchestrationError> {
×
235
        self.runtime.block_on(async {
×
236
            let mut schema_map = HashMap::new();
×
237
            for connection in &self.config.connections {
×
238
                let connector = get_connector(connection.clone())?;
×
239
                let schema_tuples = connector.list_all_schemas().await?;
×
240
                schema_map.insert(connection.name.clone(), schema_tuples);
×
241
            }
242

×
243
            Ok(schema_map)
×
244
        })
×
245
    }
×
246

247
    pub fn generate_token(&self) -> Result<String, OrchestrationError> {
×
248
        if let Some(api_config) = &self.config.api {
×
249
            if let Some(api_security) = &api_config.api_security {
×
250
                match api_security {
×
251
                    dozer_types::models::api_security::ApiSecurity::Jwt(secret) => {
×
252
                        let auth = Authorizer::new(secret, None, None);
×
253
                        let token = auth
×
254
                            .generate_token(Access::All, None)
×
255
                            .map_err(OrchestrationError::GenerateTokenFailed)?;
×
256
                        return Ok(token);
×
257
                    }
258
                }
×
259
            }
×
260
        }
×
261
        Err(OrchestrationError::MissingSecurityConfig)
×
262
    }
×
263

×
264
    pub fn build(
30✔
265
        &mut self,
30✔
266
        force: bool,
30✔
267
        shutdown: ShutdownReceiver,
30✔
268
    ) -> Result<(), OrchestrationError> {
30✔
269
        let home_dir = HomeDir::new(self.config.home_dir.clone(), self.config.cache_dir.clone());
30✔
270

30✔
271
        info!(
30✔
272
            "Initiating app: {}",
×
273
            get_colored_text(&self.config.app_name, PURPLE)
×
274
        );
×
275
        if force {
30✔
276
            self.clean()?;
×
277
        }
30✔
278
        validate_config(&self.config)?;
30✔
279

280
        // Calculate schemas.
×
281
        let endpoint_and_logs = self
30✔
282
            .config
30✔
283
            .endpoints
30✔
284
            .iter()
30✔
285
            // We're not really going to run the pipeline, so we don't create logs.
30✔
286
            .map(|endpoint| (endpoint.clone(), None))
30✔
287
            .collect();
30✔
288
        let builder = PipelineBuilder::new(
30✔
289
            &self.config.connections,
30✔
290
            &self.config.sources,
30✔
291
            self.config.sql.as_deref(),
30✔
292
            endpoint_and_logs,
30✔
293
            self.multi_pb.clone(),
30✔
294
            self.config.flags.clone().unwrap_or_default(),
30✔
295
        );
30✔
296
        let dag = self
30✔
297
            .runtime
30✔
298
            .block_on(builder.build(&self.runtime, shutdown))?;
30✔
299
        // Populate schemas.
×
300
        let dag_schemas = DagSchemas::new(dag)?;
30✔
301

302
        // Get current contract.
×
303
        let enable_token = self
30✔
304
            .config
30✔
305
            .api
30✔
306
            .as_ref()
30✔
307
            .map(|api| api.api_security.is_some())
30✔
308
            .unwrap_or(false);
30✔
309
        let enable_on_event = self
30✔
310
            .config
30✔
311
            .flags
30✔
312
            .as_ref()
30✔
313
            .map(|flags| flags.push_events)
30✔
314
            .unwrap_or_else(default_push_events);
30✔
315
        let contract = build::Contract::new(
30✔
316
            &dag_schemas,
30✔
317
            &self.config.connections,
30✔
318
            &self.config.endpoints,
30✔
319
            enable_token,
30✔
320
            enable_on_event,
30✔
321
        )?;
30✔
322

×
323
        // Run build
×
324
        let storage_config = get_storage_config(&self.config);
30✔
325
        self.runtime
30✔
326
            .block_on(build::build(&home_dir, &contract, &storage_config))?;
30✔
327

×
328
        Ok(())
30✔
329
    }
30✔
330

331
    // Cleaning the entire folder as there will be inconsistencies
×
332
    // between pipeline, cache and generated proto files.
×
333
    pub fn clean(&mut self) -> Result<(), OrchestrationError> {
×
334
        let cache_dir = PathBuf::from(self.config.cache_dir.clone());
×
335
        if cache_dir.exists() {
×
336
            fs::remove_dir_all(&cache_dir)
×
337
                .map_err(|e| ExecutionError::FileSystemError(cache_dir, e))?;
×
338
        };
×
339

×
340
        let home_dir = PathBuf::from(self.config.home_dir.clone());
×
341
        if home_dir.exists() {
×
342
            fs::remove_dir_all(&home_dir)
×
343
                .map_err(|e| ExecutionError::FileSystemError(home_dir, e))?;
×
344
        };
×
345

×
346
        Ok(())
×
347
    }
×
348

×
349
    pub fn run_all(&mut self, shutdown: ShutdownReceiver) -> Result<(), OrchestrationError> {
30✔
350
        let mut dozer_api = self.clone();
30✔
351

30✔
352
        let (tx, rx) = channel::unbounded::<bool>();
30✔
353

30✔
354
        self.build(false, shutdown.clone())?;
30✔
355

×
356
        let mut dozer_pipeline = self.clone();
30✔
357
        let pipeline_shutdown = shutdown.clone();
30✔
358
        let pipeline_thread =
30✔
359
            thread::spawn(move || dozer_pipeline.run_apps(pipeline_shutdown, Some(tx)));
30✔
360

30✔
361
        // Wait for pipeline to initialize caches before starting api server
30✔
362
        if rx.recv().is_err() {
30✔
363
            // This means the pipeline thread returned before sending a message. Either an error happened or it panicked.
×
364
            return match pipeline_thread.join() {
×
365
                Ok(Err(e)) => Err(e),
×
366
                Ok(Ok(())) => panic!("An error must have happened"),
×
367
                Err(e) => {
×
368
                    std::panic::panic_any(e);
×
369
                }
×
370
            };
×
371
        }
30✔
372

30✔
373
        dozer_api.run_api(shutdown)?;
30✔
374

×
375
        // wait for pipeline thread to shutdown gracefully
×
376
        pipeline_thread.join().unwrap()
30✔
377
    }
30✔
378
}
×
379

×
380
pub fn validate_sql(sql: String) -> Result<(), PipelineError> {
×
381
    statement_to_pipeline(&sql, &mut AppPipeline::new_with_default_flags(), None).map_or_else(
×
382
        |e| {
×
383
            error!(
×
384
                "[sql][{}] Transforms validation error: {}",
×
385
                get_colored_text("X", RED),
×
386
                e
×
387
            );
×
388
            Err(e)
×
389
        },
×
390
        |_| {
×
391
            info!(
×
392
                "[sql][{}]  Transforms validation completed",
×
393
                get_colored_text("✓", GREEN)
×
394
            );
×
395
            Ok(())
×
396
        },
×
397
    )
×
398
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc